From dfe9d6aebc25f7c5e9a3f9e6ad31d56b50d1a309 Mon Sep 17 00:00:00 2001
From: liord <lior.dikstein@altair-semi.com>
Date: Tue, 7 Jan 2025 12:38:27 +0200
Subject: [PATCH 01/18] Rename
 TargetPlatformCapabilities-->FrameworkPlatformCapabilities

---
 docsrc/source/api/api_docs/index.rst           |  2 +-
 .../get_target_platform_capabilities.rst       |  2 +-
 .../api/api_docs/modules/target_platform.rst   |  6 +++---
 .../core/common/fusion/layer_fusing.py         |  6 +++---
 .../core/common/graph/base_graph.py            |  6 +++---
 .../core/common/graph/base_node.py             |  6 +++---
 .../mixed_precision_candidates_filter.py       |  6 +++---
 .../resource_utilization_data.py               | 10 +++++-----
 .../common/pruning/greedy_mask_calculator.py   |  6 +++---
 .../common/pruning/mask/per_channel_mask.py    |  2 +-
 .../common/pruning/mask/per_simd_group_mask.py |  2 +-
 .../core/common/pruning/pruner.py              |  6 +++---
 .../set_node_quantization_config.py            |  8 ++++----
 .../core/graph_prep_runner.py                  | 14 +++++++-------
 .../keras/resource_utilization_data_facade.py  |  4 ++--
 .../resource_utilization_data_facade.py        |  4 ++--
 model_compression_toolkit/core/runner.py       |  6 +++---
 .../gptq/keras/quantization_facade.py          |  4 ++--
 .../gptq/pytorch/quantization_facade.py        |  6 +++---
 model_compression_toolkit/metadata.py          |  4 ++--
 .../pruning/keras/pruning_facade.py            |  4 ++--
 .../pruning/pytorch/pruning_facade.py          |  4 ++--
 .../ptq/keras/quantization_facade.py           |  4 ++--
 .../ptq/pytorch/quantization_facade.py         |  6 +++---
 .../qat/keras/quantization_facade.py           |  4 ++--
 .../qat/pytorch/quantization_facade.py         |  4 ++--
 .../target_platform/__init__.py                |  2 +-
 .../targetplatform2framework/__init__.py       |  2 +-
 .../targetplatform2framework/attach2fw.py      |  8 ++++----
 .../targetplatform2framework/current_tpc.py    | 16 ++++++++--------
 .../operations_to_layers.py                    |  4 ++--
 .../get_target_platform_capabilities.py        |  2 +-
 .../xquant/common/model_folding_utils.py       |  4 ++--
 .../helpers/generate_test_tp_model.py          |  4 ++--
 .../tflite_int8/imx500_int8_tp_model.py        |  2 +-
 .../second_moment_correction_test.py           |  4 ++--
 .../function_tests/test_custom_layer.py        |  4 ++--
 .../non_parallel_tests/test_keras_tp_model.py  | 18 +++++++++---------
 .../function_tests/test_pytorch_tp_model.py    | 16 ++++++++--------
 .../mixed_precision_activation_test.py         |  2 +-
 .../mixed_precision_weights_test.py            |  2 +-
 .../multi_head_attention_test.py               |  2 +-
 .../second_moment_correction_test.py           |  4 ++--
 ...ple_keras_activation_threshold_search.ipynb |  2 +-
 ...le_keras_activation_z_score_threshold.ipynb |  2 +-
 ...ample_keras_mobilenet_mixed_precision.ipynb |  2 +-
 ...mple_keras_post-training_quantization.ipynb |  2 +-
 .../keras/example_keras_pruning_mnist.ipynb    |  2 +-
 .../keras/example_keras_qat.ipynb              |  6 +++---
 .../example_pytorch_mixed_precision_ptq.ipynb  |  2 +-
 ...le_pytorch_post_training_quantization.ipynb |  2 +-
 51 files changed, 126 insertions(+), 126 deletions(-)

diff --git a/docsrc/source/api/api_docs/index.rst b/docsrc/source/api/api_docs/index.rst
index 0c4433163..1e3468fb4 100644
--- a/docsrc/source/api/api_docs/index.rst
+++ b/docsrc/source/api/api_docs/index.rst
@@ -110,7 +110,7 @@ target_platform
 ================
 - :ref:`target_platform<ug-target_platform>`: Module to create and model hardware-related settings to optimize the model according to, by the hardware the optimized model will use during inference.
 - :ref:`get_target_platform_capabilities<ug-get_target_platform_capabilities>`: A function to get a target platform model for Tensorflow and Pytorch.
-- :ref:`DefaultDict<ug-DefaultDict>`: Util class for creating a TargetPlatformCapabilities.
+- :ref:`DefaultDict<ug-DefaultDict>`: Util class for creating a FrameworkQuantizationCapabilities.
 
 
 Indices and tables
diff --git a/docsrc/source/api/api_docs/methods/get_target_platform_capabilities.rst b/docsrc/source/api/api_docs/methods/get_target_platform_capabilities.rst
index cc623b66a..e8346a359 100644
--- a/docsrc/source/api/api_docs/methods/get_target_platform_capabilities.rst
+++ b/docsrc/source/api/api_docs/methods/get_target_platform_capabilities.rst
@@ -4,7 +4,7 @@
 
 
 =======================================
-Get TargetPlatformCapabilities
+Get FrameworkQuantizationCapabilities
 =======================================
 
 .. autofunction:: model_compression_toolkit.get_target_platform_capabilities
diff --git a/docsrc/source/api/api_docs/modules/target_platform.rst b/docsrc/source/api/api_docs/modules/target_platform.rst
index c393cb21a..37733ec66 100644
--- a/docsrc/source/api/api_docs/modules/target_platform.rst
+++ b/docsrc/source/api/api_docs/modules/target_platform.rst
@@ -24,7 +24,7 @@ Models for IMX500, TFLite and qnnpack can be observed `here <https://github.com/
 
 |
 
-The object MCT should get called TargetPlatformCapabilities (or shortly TPC).
+The object MCT should get called FrameworkQuantizationCapabilities (or shortly TPC).
 This diagram demonstrates the main components:
 
 .. image:: ../../../../images/tpc.jpg
@@ -95,9 +95,9 @@ LayerFilterParams
 More filters and usage examples are detailed :ref:`here<ug-layer_filters>`.
 
 
-TargetPlatformCapabilities
+FrameworkQuantizationCapabilities
 =============================
-.. autoclass:: model_compression_toolkit.target_platform.TargetPlatformCapabilities
+.. autoclass:: model_compression_toolkit.target_platform.FrameworkQuantizationCapabilities
 
 
 
diff --git a/model_compression_toolkit/core/common/fusion/layer_fusing.py b/model_compression_toolkit/core/common/fusion/layer_fusing.py
index b11bf6d49..0ae75941b 100644
--- a/model_compression_toolkit/core/common/fusion/layer_fusing.py
+++ b/model_compression_toolkit/core/common/fusion/layer_fusing.py
@@ -16,7 +16,7 @@
 from typing import Any, List
 from model_compression_toolkit.core.common.graph.base_graph import Graph
 from model_compression_toolkit.core.common.graph.base_node import BaseNode
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.layer_filter_params import LayerFilterParams
 
 
@@ -77,14 +77,14 @@ def disable_nodes_activation_quantization(nodes: List[BaseNode]):
             qc.activation_quantization_cfg.enable_activation_quantization = False
 
 
-def fusion(graph: Graph, tpc: TargetPlatformCapabilities) -> Graph:
+def fusion(graph: Graph, tpc: FrameworkQuantizationCapabilities) -> Graph:
     """
     Fusing defines a list of operators that should be combined and treated as a single operator,
     hence no quantization is applied between them when they appear in the graph.
     This function search and disable quantization for such patterns.
     Args:
         graph: Graph we apply the fusion on.
-        tpc: TargetPlatformCapabilities object that describes the desired inference target platform (includes fusing patterns MCT should handle).
+        tpc: FrameworkQuantizationCapabilities object that describes the desired inference target platform (includes fusing patterns MCT should handle).
     Returns:
         Graph after applying fusion activation marking.
     """
diff --git a/model_compression_toolkit/core/common/graph/base_graph.py b/model_compression_toolkit/core/common/graph/base_graph.py
index 432a81f39..b6abe7218 100644
--- a/model_compression_toolkit/core/common/graph/base_graph.py
+++ b/model_compression_toolkit/core/common/graph/base_graph.py
@@ -33,7 +33,7 @@
 from model_compression_toolkit.core.common.user_info import UserInformation
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import \
-    TargetPlatformCapabilities, LayerFilterParams
+    FrameworkQuantizationCapabilities, LayerFilterParams
 
 OutTensor = namedtuple('OutTensor', 'node node_out_index')
 
@@ -87,11 +87,11 @@ def set_fw_info(self,
         self.fw_info = fw_info
 
     def set_tpc(self,
-                tpc: TargetPlatformCapabilities):
+                tpc: FrameworkQuantizationCapabilities):
         """
         Set the graph's TPC.
         Args:
-            tpc: TargetPlatformCapabilities object.
+            tpc: FrameworkQuantizationCapabilities object.
         """
         # validate graph nodes are either from the framework or a custom layer defined in the TPC
         # Validate graph nodes are either built-in layers from the framework or custom layers defined in the TPC
diff --git a/model_compression_toolkit/core/common/graph/base_node.py b/model_compression_toolkit/core/common/graph/base_node.py
index 67c4f2f57..3310d1d52 100644
--- a/model_compression_toolkit/core/common/graph/base_node.py
+++ b/model_compression_toolkit/core/common/graph/base_node.py
@@ -25,7 +25,7 @@
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import QuantizationConfigOptions, \
     OpQuantizationConfig
 from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import max_input_activation_n_bits
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities, LayerFilterParams
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities, LayerFilterParams
 
 
 class BaseNode:
@@ -536,7 +536,7 @@ def get_all_weights_attr_candidates(self, attr: str) -> List[WeightsAttrQuantiza
         # the inner method would log an exception.
         return [c.weights_quantization_cfg.get_attr_config(attr) for c in self.candidates_quantization_cfg]
 
-    def get_qco(self, tpc: TargetPlatformCapabilities) -> QuantizationConfigOptions:
+    def get_qco(self, tpc: FrameworkQuantizationCapabilities) -> QuantizationConfigOptions:
         """
         Get the QuantizationConfigOptions of the node according
         to the mappings from layers/LayerFilterParams to the OperatorsSet in the TargetPlatformModel.
@@ -563,7 +563,7 @@ def get_qco(self, tpc: TargetPlatformCapabilities) -> QuantizationConfigOptions:
                 Logger.critical(f"Found duplicate qco types for node '{self.name}' of type '{self.type}'!")  # pragma: no cover
         return tpc.tp_model.default_qco
 
-    def filter_node_qco_by_graph(self, tpc: TargetPlatformCapabilities,
+    def filter_node_qco_by_graph(self, tpc: FrameworkQuantizationCapabilities,
                                  next_nodes: List, node_qc_options: QuantizationConfigOptions
                                  ) -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig]]:
         """
diff --git a/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py b/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py
index 3abde76b7..146724e3c 100644
--- a/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py
+++ b/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py
@@ -17,13 +17,13 @@
 from model_compression_toolkit.core import ResourceUtilization, FrameworkInfo
 from model_compression_toolkit.core.common import Graph
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 
 
 def filter_candidates_for_mixed_precision(graph: Graph,
                                           target_resource_utilization: ResourceUtilization,
                                           fw_info: FrameworkInfo,
-                                          tpc: TargetPlatformCapabilities):
+                                          tpc: FrameworkQuantizationCapabilities):
     """
     Filters out candidates in case of mixed precision search for only weights or activation compression.
     For instance, if running only weights compression - filters out candidates of activation configurable nodes
@@ -36,7 +36,7 @@ def filter_candidates_for_mixed_precision(graph: Graph,
         graph: A graph representation of the model to be quantized.
         target_resource_utilization: The resource utilization of the target device.
         fw_info: fw_info: Information needed for quantization about the specific framework.
-        tpc: TargetPlatformCapabilities object that describes the desired inference target platform.
+        tpc: FrameworkQuantizationCapabilities object that describes the desired inference target platform.
 
     """
 
diff --git a/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py b/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py
index a647a2cc5..760a0a2c3 100644
--- a/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py
+++ b/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py
@@ -25,7 +25,7 @@
 from model_compression_toolkit.core.common.framework_implementation import FrameworkImplementation
 from model_compression_toolkit.core.common.graph.edge import EDGE_SINK_INDEX
 from model_compression_toolkit.core.graph_prep_runner import graph_preparation_runner
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import QuantizationConfigOptions
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.ru_methods import calc_graph_cuts
 
@@ -33,7 +33,7 @@
 def compute_resource_utilization_data(in_model: Any,
                                       representative_data_gen: Callable,
                                       core_config: CoreConfig,
-                                      tpc: TargetPlatformCapabilities,
+                                      tpc: FrameworkQuantizationCapabilities,
                                       fw_info: FrameworkInfo,
                                       fw_impl: FrameworkImplementation,
                                       transformed_graph: Graph = None,
@@ -47,7 +47,7 @@ def compute_resource_utilization_data(in_model: Any,
         in_model:  Model to build graph from (the model that intended to be quantized).
         representative_data_gen: Dataset used for calibration.
         core_config: CoreConfig containing parameters of how the model should be quantized.
-        tpc: TargetPlatformCapabilities object that models the inference target platform and
+        tpc: FrameworkQuantizationCapabilities object that models the inference target platform and
                                               the attached framework operator's information.
         fw_info: Information needed for quantization about the specific framework.
         fw_impl: FrameworkImplementation object with a specific framework methods implementation.
@@ -246,7 +246,7 @@ def requires_mixed_precision(in_model: Any,
                              target_resource_utilization: ResourceUtilization,
                              representative_data_gen: Callable,
                              core_config: CoreConfig,
-                             tpc: TargetPlatformCapabilities,
+                             tpc: FrameworkQuantizationCapabilities,
                              fw_info: FrameworkInfo,
                              fw_impl: FrameworkImplementation) -> bool:
     """
@@ -261,7 +261,7 @@ def requires_mixed_precision(in_model: Any,
         target_resource_utilization: The resource utilization of the target device.
         representative_data_gen: A function that generates representative data for the model.
         core_config: CoreConfig containing parameters of how the model should be quantized.
-        tpc: TargetPlatformCapabilities object that models the inference target platform and
+        tpc: FrameworkQuantizationCapabilities object that models the inference target platform and
                                               the attached framework operator's information.
         fw_info: Information needed for quantization about the specific framework.
         fw_impl: FrameworkImplementation object with a specific framework methods implementation.
diff --git a/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py b/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py
index 8e583a537..e6367c4c6 100644
--- a/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py
+++ b/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py
@@ -24,7 +24,7 @@
 from model_compression_toolkit.core.common.pruning.pruning_framework_implementation import PruningFrameworkImplementation
 from model_compression_toolkit.core.common.pruning.mask.per_simd_group_mask import PerSIMDGroupMask
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 
 
 class GreedyMaskCalculator:
@@ -42,7 +42,7 @@ def __init__(self,
                  target_resource_utilization: ResourceUtilization,
                  graph: Graph,
                  fw_impl: PruningFrameworkImplementation,
-                 tpc: TargetPlatformCapabilities,
+                 tpc: FrameworkQuantizationCapabilities,
                  simd_groups_indices: Dict[BaseNode, List[List[int]]]):
         """
         Args:
@@ -52,7 +52,7 @@ def __init__(self,
             target_resource_utilization (ResourceUtilization): The target resource utilization to achieve.
             graph (Graph): The computational graph of the model.
             fw_impl (PruningFrameworkImplementation): Framework-specific implementation details.
-            tpc (TargetPlatformCapabilities): Platform-specific constraints and capabilities.
+            tpc (FrameworkQuantizationCapabilities): Platform-specific constraints and capabilities.
             simd_groups_indices (Dict[BaseNode, List[List[int]]]): Indices of SIMD groups in each node.
         """
         self.prunable_nodes = prunable_nodes
diff --git a/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py b/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py
index c2f75d99a..2cbf47af5 100644
--- a/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py
+++ b/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py
@@ -23,7 +23,7 @@
 from model_compression_toolkit.core.common.pruning.memory_calculator import MemoryCalculator
 from model_compression_toolkit.core.common.pruning.pruning_framework_implementation import PruningFrameworkImplementation
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 
 class MaskIndicator(Enum):
     """
diff --git a/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py b/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py
index e8b76d330..79c03336d 100644
--- a/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py
+++ b/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py
@@ -23,7 +23,7 @@
 from model_compression_toolkit.core.common.pruning.memory_calculator import MemoryCalculator
 from model_compression_toolkit.core.common.pruning.pruning_framework_implementation import PruningFrameworkImplementation
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 
 class PerSIMDGroupMask:
     def __init__(self,
diff --git a/model_compression_toolkit/core/common/pruning/pruner.py b/model_compression_toolkit/core/common/pruning/pruner.py
index 3b1509f33..8e2de586a 100644
--- a/model_compression_toolkit/core/common/pruning/pruner.py
+++ b/model_compression_toolkit/core/common/pruning/pruner.py
@@ -29,7 +29,7 @@
 from model_compression_toolkit.core.common.pruning.pruning_info import PruningInfo, \
     unroll_simd_scores_to_per_channel_scores
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 
 class Pruner:
     """
@@ -43,7 +43,7 @@ def __init__(self,
                  target_resource_utilization: ResourceUtilization,
                  representative_data_gen: Callable,
                  pruning_config: PruningConfig,
-                 target_platform_capabilities: TargetPlatformCapabilities):
+                 target_platform_capabilities: FrameworkQuantizationCapabilities):
         """
         Args:
             float_graph (Graph): The floating-point representation of the model's computation graph.
@@ -52,7 +52,7 @@ def __init__(self,
             target_resource_utilization (ResourceUtilization): The target resource utilization to be achieved after pruning.
             representative_data_gen (Callable): Generator function for representative dataset used in pruning analysis.
             pruning_config (PruningConfig): Configuration object specifying how pruning should be performed.
-            target_platform_capabilities (TargetPlatformCapabilities): Object encapsulating the capabilities of the target hardware platform.
+            target_platform_capabilities (FrameworkQuantizationCapabilities): Object encapsulating the capabilities of the target hardware platform.
         """
         self.float_graph = float_graph
         self.fw_info = fw_info
diff --git a/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py b/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py
index 93045cdd6..2b9ffe5b6 100644
--- a/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py
+++ b/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py
@@ -33,7 +33,7 @@
 from model_compression_toolkit.core.common.quantization.quantization_fn_selection import \
     get_weights_quantization_fn
 from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import max_input_activation_n_bits
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OpQuantizationConfig, \
     QuantizationConfigOptions
 
@@ -78,7 +78,7 @@ def set_quantization_configuration_to_graph(graph: Graph,
 
 
 def filter_node_qco_by_graph(node: BaseNode,
-                             tpc: TargetPlatformCapabilities,
+                             tpc: FrameworkQuantizationCapabilities,
                              graph: Graph,
                              node_qc_options: QuantizationConfigOptions
                              ) -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig]]:
@@ -147,7 +147,7 @@ def set_quantization_configs_to_node(node: BaseNode,
                                      graph: Graph,
                                      quant_config: QuantizationConfig,
                                      fw_info: FrameworkInfo,
-                                     tpc: TargetPlatformCapabilities,
+                                     tpc: FrameworkQuantizationCapabilities,
                                      mixed_precision_enable: bool = False,
                                      manual_bit_width_override: Optional[int] = None):
     """
@@ -158,7 +158,7 @@ def set_quantization_configs_to_node(node: BaseNode,
         graph (Graph): Model's internal representation graph.
         quant_config (QuantizationConfig): Quantization configuration to generate the node's configurations from.
         fw_info (FrameworkInfo): Information needed for quantization about the specific framework.
-        tpc (TargetPlatformCapabilities): TargetPlatformCapabilities to get default OpQuantizationConfig.
+        tpc (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to get default OpQuantizationConfig.
         mixed_precision_enable (bool): Whether mixed precision is enabled. Defaults to False.
         manual_bit_width_override (Optional[int]): Specifies a custom bit-width to override the node's activation bit-width. Defaults to None.
     """
diff --git a/model_compression_toolkit/core/graph_prep_runner.py b/model_compression_toolkit/core/graph_prep_runner.py
index 6fdd3bf36..cdeb42901 100644
--- a/model_compression_toolkit/core/graph_prep_runner.py
+++ b/model_compression_toolkit/core/graph_prep_runner.py
@@ -29,7 +29,7 @@
 from model_compression_toolkit.core.common.substitutions.apply_substitutions import substitute
 from model_compression_toolkit.core.common.substitutions.linear_collapsing_substitution import \
     linear_collapsing_substitute
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.visualization.tensorboard_writer import TensorboardWriter
 
 
@@ -38,7 +38,7 @@ def graph_preparation_runner(in_model: Any,
                              quantization_config: QuantizationConfig,
                              fw_info: FrameworkInfo,
                              fw_impl: FrameworkImplementation,
-                             tpc: TargetPlatformCapabilities,
+                             tpc: FrameworkQuantizationCapabilities,
                              bit_width_config: BitWidthConfig = None,
                              tb_w: TensorboardWriter = None,
                              mixed_precision_enable: bool = False,
@@ -58,7 +58,7 @@ def graph_preparation_runner(in_model: Any,
         fw_info (FrameworkInfo): Information needed for quantization about the specific framework (e.g., kernel channels indices,
             groups of layers by how they should be quantized, etc.).
         fw_impl (FrameworkImplementation): FrameworkImplementation object with a specific framework methods implementation.
-        tpc (TargetPlatformCapabilities): TargetPlatformCapabilities object that models the inference target platform and
+        tpc (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities object that models the inference target platform and
             the attached framework operator's information.
         bit_width_config (BitWidthConfig): Config for bit-width selection. Defaults to None.
         tb_w (TensorboardWriter): TensorboardWriter object for logging.
@@ -92,7 +92,7 @@ def graph_preparation_runner(in_model: Any,
 
 
 def get_finalized_graph(initial_graph: Graph,
-                        tpc: TargetPlatformCapabilities,
+                        tpc: FrameworkQuantizationCapabilities,
                         quant_config: QuantizationConfig = DEFAULTCONFIG,
                         bit_width_config: BitWidthConfig = None,
                         fw_info: FrameworkInfo = None,
@@ -106,7 +106,7 @@ def get_finalized_graph(initial_graph: Graph,
 
     Args:
         initial_graph (Graph): Graph to apply the changes to.
-        tpc (TargetPlatformCapabilities): TargetPlatformCapabilities object that describes the desired inference target platform (includes fusing patterns MCT should handle).
+        tpc (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities object that describes the desired inference target platform (includes fusing patterns MCT should handle).
         quant_config (QuantizationConfig): QuantizationConfig containing parameters of how the model should be
             quantized.
         bit_width_config (BitWidthConfig): Config for bit-width selection. Defaults to None.
@@ -185,7 +185,7 @@ def get_finalized_graph(initial_graph: Graph,
 
 def read_model_to_graph(in_model: Any,
                         representative_data_gen: Callable,
-                        tpc: TargetPlatformCapabilities,
+                        tpc: FrameworkQuantizationCapabilities,
                         fw_info: FrameworkInfo = None,
                         fw_impl: FrameworkImplementation = None) -> Graph:
 
@@ -195,7 +195,7 @@ def read_model_to_graph(in_model: Any,
     Args:
         in_model: Model to optimize and prepare for quantization.
         representative_data_gen: Dataset used for calibration.
-        tpc: TargetPlatformCapabilities object that models the inference target platform and
+        tpc: FrameworkQuantizationCapabilities object that models the inference target platform and
                       the attached framework operator's information.
         fw_info: Information needed for quantization about the specific framework (e.g.,
                 kernel channels indices, groups of layers by how they should be quantized, etc.)
diff --git a/model_compression_toolkit/core/keras/resource_utilization_data_facade.py b/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
index ffc313c37..4ad8a3409 100644
--- a/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
+++ b/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
@@ -19,7 +19,7 @@
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.constants import TENSORFLOW
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization_data import compute_resource_utilization_data
 from model_compression_toolkit.verify_packages import FOUND_TF
 
@@ -51,7 +51,7 @@ def keras_resource_utilization_data(in_model: Model,
             in_model (Model): Keras model to quantize.
             representative_data_gen (Callable): Dataset used for calibration.
             core_config (CoreConfig): CoreConfig containing parameters for quantization and mixed precision of how the model should be quantized.
-            target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the Keras model according to.
+            target_platform_capabilities (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to optimize the Keras model according to.
 
         Returns:
 
diff --git a/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py b/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py
index 23c1d2c4c..af4bb7e9c 100644
--- a/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py
+++ b/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py
@@ -18,7 +18,7 @@
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.constants import PYTORCH
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization_data import compute_resource_utilization_data
 from model_compression_toolkit.core.common.quantization.core_config import CoreConfig
@@ -51,7 +51,7 @@ def pytorch_resource_utilization_data(in_model: Module,
             in_model (Model): PyTorch model to quantize.
             representative_data_gen (Callable): Dataset used for calibration.
             core_config (CoreConfig): CoreConfig containing parameters for quantization and mixed precision
-            target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the PyTorch model according to.
+            target_platform_capabilities (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to optimize the PyTorch model according to.
 
         Returns:
 
diff --git a/model_compression_toolkit/core/runner.py b/model_compression_toolkit/core/runner.py
index 1948f28c2..43cc09b55 100644
--- a/model_compression_toolkit/core/runner.py
+++ b/model_compression_toolkit/core/runner.py
@@ -44,7 +44,7 @@
 from model_compression_toolkit.core.common.mixed_precision.mixed_precision_search_facade import search_bit_width
 from model_compression_toolkit.core.common.network_editors.edit_network import edit_network_graph
 from model_compression_toolkit.core.common.quantization.core_config import CoreConfig
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.visualization.final_config_visualizer import \
     WeightsFinalBitwidthConfigVisualizer, \
     ActivationFinalBitwidthConfigVisualizer
@@ -57,7 +57,7 @@ def core_runner(in_model: Any,
                 core_config: CoreConfig,
                 fw_info: FrameworkInfo,
                 fw_impl: FrameworkImplementation,
-                tpc: TargetPlatformCapabilities,
+                tpc: FrameworkQuantizationCapabilities,
                 target_resource_utilization: ResourceUtilization = None,
                 running_gptq: bool = False,
                 tb_w: TensorboardWriter = None):
@@ -77,7 +77,7 @@ def core_runner(in_model: Any,
         fw_info: Information needed for quantization about the specific framework (e.g., kernel channels indices,
         groups of layers by how they should be quantized, etc.).
         fw_impl: FrameworkImplementation object with a specific framework methods implementation.
-        tpc: TargetPlatformCapabilities object that models the inference target platform and
+        tpc: FrameworkQuantizationCapabilities object that models the inference target platform and
                                               the attached framework operator's information.
         target_resource_utilization: ResourceUtilization to constraint the search of the mixed-precision configuration for the model.
         tb_w: TensorboardWriter object for logging
diff --git a/model_compression_toolkit/gptq/keras/quantization_facade.py b/model_compression_toolkit/gptq/keras/quantization_facade.py
index 7d5ba1f7e..9de539c47 100644
--- a/model_compression_toolkit/gptq/keras/quantization_facade.py
+++ b/model_compression_toolkit/gptq/keras/quantization_facade.py
@@ -33,7 +33,7 @@
 from model_compression_toolkit.core.runner import core_runner
 from model_compression_toolkit.gptq.runner import gptq_runner
 from model_compression_toolkit.core.analyzer import analyzer_model_quantization
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.metadata import create_model_metadata
 
 
@@ -179,7 +179,7 @@ def keras_gradient_post_training_quantization(in_model: Model, representative_da
             gptq_representative_data_gen (Callable): Dataset used for GPTQ training. If None defaults to representative_data_gen
             target_resource_utilization (ResourceUtilization): ResourceUtilization object to limit the search of the mixed-precision configuration as desired.
             core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
-            target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the Keras model according to.
+            target_platform_capabilities (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to optimize the Keras model according to.
 
         Returns:
 
diff --git a/model_compression_toolkit/gptq/pytorch/quantization_facade.py b/model_compression_toolkit/gptq/pytorch/quantization_facade.py
index 38b09aaa8..e886b48c3 100644
--- a/model_compression_toolkit/gptq/pytorch/quantization_facade.py
+++ b/model_compression_toolkit/gptq/pytorch/quantization_facade.py
@@ -32,7 +32,7 @@
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.metadata import create_model_metadata
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.verify_packages import FOUND_TORCH
 
 
@@ -148,7 +148,7 @@ def pytorch_gradient_post_training_quantization(model: Module,
         """
         Quantize a trained Pytorch module using post-training quantization.
         By default, the module is quantized using a symmetric constraint quantization thresholds
-        (power of two) as defined in the default TargetPlatformCapabilities.
+        (power of two) as defined in the default FrameworkQuantizationCapabilities.
         The module is first optimized using several transformations (e.g. BatchNormalization folding to
         preceding layers). Then, using a given dataset, statistics (e.g. min/max, histogram, etc.) are
         being collected for each layer's output (and input, depends on the quantization configuration).
@@ -168,7 +168,7 @@ def pytorch_gradient_post_training_quantization(model: Module,
             core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
             gptq_config (GradientPTQConfig): Configuration for using gptq (e.g. optimizer).
             gptq_representative_data_gen (Callable): Dataset used for GPTQ training. If None defaults to representative_data_gen
-            target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the PyTorch model according to.
+            target_platform_capabilities (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to optimize the PyTorch model according to.
 
         Returns:
             A quantized module and information the user may need to handle the quantized module.
diff --git a/model_compression_toolkit/metadata.py b/model_compression_toolkit/metadata.py
index 192baad82..9e8400bb9 100644
--- a/model_compression_toolkit/metadata.py
+++ b/model_compression_toolkit/metadata.py
@@ -18,10 +18,10 @@
 from model_compression_toolkit.constants import OPERATORS_SCHEDULING, FUSED_NODES_MAPPING, CUTS, MAX_CUT, OP_ORDER, \
     OP_RECORD, SHAPE, NODE_OUTPUT_INDEX, NODE_NAME, TOTAL_SIZE, MEM_ELEMENTS
 from model_compression_toolkit.core.common.graph.memory_graph.compute_graph_max_cut import SchedulerInfo
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 
 
-def create_model_metadata(tpc: TargetPlatformCapabilities,
+def create_model_metadata(tpc: FrameworkQuantizationCapabilities,
                           scheduling_info: SchedulerInfo = None) -> Dict:
     """
     Creates and returns a metadata dictionary for the model, including version information
diff --git a/model_compression_toolkit/pruning/keras/pruning_facade.py b/model_compression_toolkit/pruning/keras/pruning_facade.py
index cd565c4bc..7ea806755 100644
--- a/model_compression_toolkit/pruning/keras/pruning_facade.py
+++ b/model_compression_toolkit/pruning/keras/pruning_facade.py
@@ -26,7 +26,7 @@
 from model_compression_toolkit.core.common.quantization.set_node_quantization_config import set_quantization_configuration_to_graph
 from model_compression_toolkit.core.graph_prep_runner import read_model_to_graph
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.quantization.quantization_config import DEFAULTCONFIG
 from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 
@@ -62,7 +62,7 @@ def keras_pruning_experimental(model: Model,
             target_resource_utilization (ResourceUtilization): The target Key Performance Indicators to be achieved through pruning.
             representative_data_gen (Callable): A function to generate representative data for pruning analysis.
             pruning_config (PruningConfig): Configuration settings for the pruning process. Defaults to standard config.
-            target_platform_capabilities (TargetPlatformCapabilities): Platform-specific constraints and capabilities. Defaults to DEFAULT_KERAS_TPC.
+            target_platform_capabilities (FrameworkQuantizationCapabilities): Platform-specific constraints and capabilities. Defaults to DEFAULT_KERAS_TPC.
 
         Returns:
             Tuple[Model, PruningInfo]: A tuple containing the pruned Keras model and associated pruning information.
diff --git a/model_compression_toolkit/pruning/pytorch/pruning_facade.py b/model_compression_toolkit/pruning/pytorch/pruning_facade.py
index 2c1b2d498..be59db610 100644
--- a/model_compression_toolkit/pruning/pytorch/pruning_facade.py
+++ b/model_compression_toolkit/pruning/pytorch/pruning_facade.py
@@ -25,7 +25,7 @@
 from model_compression_toolkit.core.common.quantization.set_node_quantization_config import set_quantization_configuration_to_graph
 from model_compression_toolkit.core.graph_prep_runner import read_model_to_graph
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.quantization.quantization_config import DEFAULTCONFIG
 from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 
@@ -67,7 +67,7 @@ def pytorch_pruning_experimental(model: Module,
             target_resource_utilization (ResourceUtilization): Key Performance Indicators specifying the pruning targets.
             representative_data_gen (Callable): A function to generate representative data for pruning analysis.
             pruning_config (PruningConfig): Configuration settings for the pruning process. Defaults to standard config.
-            target_platform_capabilities (TargetPlatformCapabilities): Platform-specific constraints and capabilities.
+            target_platform_capabilities (FrameworkQuantizationCapabilities): Platform-specific constraints and capabilities.
                 Defaults to DEFAULT_PYTORCH_TPC.
 
         Returns:
diff --git a/model_compression_toolkit/ptq/keras/quantization_facade.py b/model_compression_toolkit/ptq/keras/quantization_facade.py
index 97785fb33..d7d4ae682 100644
--- a/model_compression_toolkit/ptq/keras/quantization_facade.py
+++ b/model_compression_toolkit/ptq/keras/quantization_facade.py
@@ -27,7 +27,7 @@
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
     MixedPrecisionQuantizationConfig
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.runner import core_runner
 from model_compression_toolkit.ptq.runner import ptq_runner
 from model_compression_toolkit.metadata import create_model_metadata
@@ -71,7 +71,7 @@ def keras_post_training_quantization(in_model: Model,
              representative_data_gen (Callable): Dataset used for calibration.
              target_resource_utilization (ResourceUtilization): ResourceUtilization object to limit the search of the mixed-precision configuration as desired.
              core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
-             target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the Keras model according to.
+             target_platform_capabilities (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to optimize the Keras model according to.
 
          Returns:
 
diff --git a/model_compression_toolkit/ptq/pytorch/quantization_facade.py b/model_compression_toolkit/ptq/pytorch/quantization_facade.py
index 2f71ea619..c3835d1e6 100644
--- a/model_compression_toolkit/ptq/pytorch/quantization_facade.py
+++ b/model_compression_toolkit/ptq/pytorch/quantization_facade.py
@@ -21,7 +21,7 @@
 from model_compression_toolkit.constants import PYTORCH
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
 from model_compression_toolkit.verify_packages import FOUND_TORCH
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.core import CoreConfig
 from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
@@ -53,7 +53,7 @@ def pytorch_post_training_quantization(in_module: Module,
         """
         Quantize a trained Pytorch module using post-training quantization.
         By default, the module is quantized using a symmetric constraint quantization thresholds
-        (power of two) as defined in the default TargetPlatformCapabilities.
+        (power of two) as defined in the default FrameworkQuantizationCapabilities.
         The module is first optimized using several transformations (e.g. BatchNormalization folding to
         preceding layers). Then, using a given dataset, statistics (e.g. min/max, histogram, etc.) are
         being collected for each layer's output (and input, depends on the quantization configuration).
@@ -68,7 +68,7 @@ def pytorch_post_training_quantization(in_module: Module,
             representative_data_gen (Callable): Dataset used for calibration.
             target_resource_utilization (ResourceUtilization): ResourceUtilization object to limit the search of the mixed-precision configuration as desired.
             core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
-            target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the PyTorch model according to.
+            target_platform_capabilities (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to optimize the PyTorch model according to.
 
         Returns:
             A quantized module and information the user may need to handle the quantized module.
diff --git a/model_compression_toolkit/qat/keras/quantization_facade.py b/model_compression_toolkit/qat/keras/quantization_facade.py
index c3494c44f..92f52b0b7 100644
--- a/model_compression_toolkit/qat/keras/quantization_facade.py
+++ b/model_compression_toolkit/qat/keras/quantization_facade.py
@@ -25,7 +25,7 @@
 from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
     MixedPrecisionQuantizationConfig
 from mct_quantizers import KerasActivationQuantizationHolder
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.runner import core_runner
 from model_compression_toolkit.ptq.runner import ptq_runner
 
@@ -115,7 +115,7 @@ def keras_quantization_aware_training_init_experimental(in_model: Model,
              target_resource_utilization (ResourceUtilization): ResourceUtilization object to limit the search of the mixed-precision configuration as desired.
              core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
              qat_config (QATConfig): QAT configuration
-             target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the Keras model according to.
+             target_platform_capabilities (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to optimize the Keras model according to.
 
          Returns:
 
diff --git a/model_compression_toolkit/qat/pytorch/quantization_facade.py b/model_compression_toolkit/qat/pytorch/quantization_facade.py
index 7f9d66373..30a2d0344 100644
--- a/model_compression_toolkit/qat/pytorch/quantization_facade.py
+++ b/model_compression_toolkit/qat/pytorch/quantization_facade.py
@@ -31,7 +31,7 @@
 from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
     MixedPrecisionQuantizationConfig
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import \
-    TargetPlatformCapabilities
+    FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.runner import core_runner
 from model_compression_toolkit.ptq.runner import ptq_runner
 
@@ -104,7 +104,7 @@ def pytorch_quantization_aware_training_init_experimental(in_model: Module,
              target_resource_utilization (ResourceUtilization): ResourceUtilization object to limit the search of the mixed-precision configuration as desired.
              core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
              qat_config (QATConfig): QAT configuration
-             target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the Pytorch model according to.
+             target_platform_capabilities (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to optimize the Pytorch model according to.
 
          Returns:
 
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py b/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py
index 249cec797..0f9631aca 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py
+++ b/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py
@@ -14,7 +14,7 @@
 # ==============================================================================
 
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import AttributeFilter
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import TargetPlatformCapabilities, OperationsSetToLayers, Smaller, SmallerEq, NotEq, Eq, GreaterEq, Greater, LayerFilterParams, OperationsToLayers, get_current_tpc
+from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities, OperationsSetToLayers, Smaller, SmallerEq, NotEq, Eq, GreaterEq, Greater, LayerFilterParams, OperationsToLayers, get_current_tpc
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, OperatorsSet, \
     OperatorSetConcat, Signedness, AttributeQuantizationConfig, OpQuantizationConfig, QuantizationConfigOptions, Fusing
 
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py
index 3f52ffa2e..2f1e68bfb 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py
+++ b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py
@@ -14,7 +14,7 @@
 # ==============================================================================
 
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.current_tpc import get_current_tpc
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.target_platform_capabilities import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.framework_quantization_capabilities import FrameworkQuantizationCapabilities
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import \
     Eq, GreaterEq, NotEq, SmallerEq, Greater, Smaller
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.layer_filter_params import \
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2fw.py b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2fw.py
index 0c752e517..fb3f0af84 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2fw.py
+++ b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2fw.py
@@ -3,7 +3,7 @@
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, \
     OperatorsSet
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities, \
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities, \
     OperationsSetToLayers
 
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
@@ -21,7 +21,7 @@ def __init__(self):
 
     def attach(self, tpc_model: TargetPlatformModel,
                custom_opset2layer: Optional[Dict[str, 'CustomOpsetLayers']] = None
-               ) -> TargetPlatformCapabilities:
+               ) -> FrameworkQuantizationCapabilities:
         """
         Attaching a TargetPlatformModel which includes a platform capabilities description to specific
         framework's operators.
@@ -33,11 +33,11 @@ def attach(self, tpc_model: TargetPlatformModel,
                 an operator set unique name to a pair of: a list of framework operators and an optional
                 operator's attributes names mapping.
 
-        Returns: a TargetPlatformCapabilities object.
+        Returns: a FrameworkQuantizationCapabilities object.
 
         """
 
-        tpc = TargetPlatformCapabilities(tpc_model)
+        tpc = FrameworkQuantizationCapabilities(tpc_model)
         custom_opset2layer = custom_opset2layer if custom_opset2layer is not None else {}
 
         with tpc:
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py
index ce6f034dc..e0fcd9560 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py
+++ b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py
@@ -18,7 +18,7 @@
 def get_current_tpc():
     """
 
-    Returns: The current TargetPlatformCapabilities that is being used and accessed.
+    Returns: The current FrameworkQuantizationCapabilities that is being used and accessed.
 
     """
     return _current_tpc.get()
@@ -26,7 +26,7 @@ def get_current_tpc():
 
 class _CurrentTPC(object):
     """
-    Wrapper of the current TargetPlatformCapabilities object that is being accessed and defined.
+    Wrapper of the current FrameworkQuantizationCapabilities object that is being accessed and defined.
     """
     def __init__(self):
         super(_CurrentTPC, self).__init__()
@@ -35,28 +35,28 @@ def __init__(self):
     def get(self):
         """
 
-        Returns: The current TargetPlatformCapabilities that is being defined.
+        Returns: The current FrameworkQuantizationCapabilities that is being defined.
 
         """
         if self.tpc is None:
-            Logger.critical("'TargetPlatformCapabilities' (TPC) instance is not initialized.")
+            Logger.critical("'FrameworkQuantizationCapabilities' (TPC) instance is not initialized.")
         return self.tpc
 
     def reset(self):
         """
 
-        Reset the current TargetPlatformCapabilities so a new TargetPlatformCapabilities can be wrapped and
-        used as the current TargetPlatformCapabilities object.
+        Reset the current FrameworkQuantizationCapabilities so a new FrameworkQuantizationCapabilities can be wrapped and
+        used as the current FrameworkQuantizationCapabilities object.
 
         """
         self.tpc = None
 
     def set(self, target_platform_capabilities):
         """
-        Set and wrap a TargetPlatformCapabilities as the current TargetPlatformCapabilities.
+        Set and wrap a FrameworkQuantizationCapabilities as the current FrameworkQuantizationCapabilities.
 
         Args:
-            target_platform_capabilities: TargetPlatformCapabilities to set as the current TargetPlatformCapabilities
+            target_platform_capabilities: FrameworkQuantizationCapabilities to set as the current FrameworkQuantizationCapabilities
             to access and use.
 
         """
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py
index ea09059e2..bedae4f6d 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py
+++ b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py
@@ -19,12 +19,12 @@
 from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import \
     get_config_options_by_operators_set, is_opset_in_model
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.current_tpc import  _current_tpc
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.target_platform_capabilities_component import TargetPlatformCapabilitiesComponent
+from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.framework_quantization_capabilities_component import FrameworkQuantizationCapabilitiesComponent
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OperatorsSetBase, OperatorSetConcat
 from model_compression_toolkit import DefaultDict
 
 
-class OperationsSetToLayers(TargetPlatformCapabilitiesComponent):
+class OperationsSetToLayers(FrameworkQuantizationCapabilitiesComponent):
     """
     Associate an OperatorsSet to a list of framework's layers.
     """
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py
index c0170f954..c83e8ed00 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py
@@ -32,7 +32,7 @@ def get_target_platform_capabilities(fw_name: str,
     existing TPC API.
 
     Args:
-        fw_name: Framework name of the TargetPlatformCapabilities.
+        fw_name: Framework name of the FrameworkQuantizationCapabilities.
         target_platform_name: Target platform model name the model will use for inference.
         target_platform_version: Target platform capabilities version.
 
diff --git a/model_compression_toolkit/xquant/common/model_folding_utils.py b/model_compression_toolkit/xquant/common/model_folding_utils.py
index 679854b96..c9e92d228 100644
--- a/model_compression_toolkit/xquant/common/model_folding_utils.py
+++ b/model_compression_toolkit/xquant/common/model_folding_utils.py
@@ -23,7 +23,7 @@
 from typing import Any, Callable
 
 from model_compression_toolkit.core.common import Graph
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 
 
 class ModelFoldingUtils:
@@ -35,7 +35,7 @@ class ModelFoldingUtils:
     def __init__(self,
                  fw_info: FrameworkInfo,
                  fw_impl: FrameworkImplementation,
-                 fw_default_tpc: TargetPlatformCapabilities):
+                 fw_default_tpc: FrameworkQuantizationCapabilities):
         """
         Initialize the ModelFoldingUtils class with framework-specific information, implementation details,
         and default TPC.
diff --git a/tests/common_tests/helpers/generate_test_tp_model.py b/tests/common_tests/helpers/generate_test_tp_model.py
index c994e6944..0991ea323 100644
--- a/tests/common_tests/helpers/generate_test_tp_model.py
+++ b/tests/common_tests/helpers/generate_test_tp_model.py
@@ -168,7 +168,7 @@ def generate_custom_test_tp_model(name: str,
 
 def generate_test_tpc(name: str,
                       tp_model: schema.TargetPlatformModel,
-                      base_tpc: tp.TargetPlatformCapabilities,
+                      base_tpc: tp.FrameworkQuantizationCapabilities,
                       op_sets_to_layer_add: Dict[str, List[Any]] = None,
                       op_sets_to_layer_drop: Dict[str, List[Any]] = None,
                       attr_mapping: Dict[str, Dict] = {}):
@@ -189,7 +189,7 @@ def generate_test_tpc(name: str,
         # Remove empty op sets
         merged_dict = {op_set_name: layers for op_set_name, layers in merged_dict.items() if len(layers) == 0}
 
-    tpc = tp.TargetPlatformCapabilities(tp_model)
+    tpc = tp.FrameworkQuantizationCapabilities(tp_model)
 
     with tpc:
         for op_set_name, layers in merged_dict.items():
diff --git a/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tp_model.py b/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tp_model.py
index 46e612bec..f7969ea51 100644
--- a/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tp_model.py
+++ b/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tp_model.py
@@ -69,7 +69,7 @@ def get_int8_tpc(edit_weights_params_dict={}, edit_act_params_dict={}) -> tp.Tar
 
 
 def generate_keras_tpc(name: str, tp_model: schema.TargetPlatformModel):
-    keras_tpc = tp.TargetPlatformCapabilities(tp_model)
+    keras_tpc = tp.FrameworkQuantizationCapabilities(tp_model)
 
     with keras_tpc:
         tp.OperationsSetToLayers("NoQuantization", [Reshape,
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py
index 86be4a2d8..aa48c883f 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py
@@ -37,7 +37,7 @@
 from model_compression_toolkit.core.runner import core_runner
 from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
@@ -268,7 +268,7 @@ def prepare_graph(self,
                       fw_info: FrameworkInfo = DEFAULT_KERAS_INFO,
                       network_editor: List[EditRule] = [],
                       analyze_similarity: bool = False,
-                      target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_KERAS_TPC) -> \
+                      target_platform_capabilities: FrameworkQuantizationCapabilities = DEFAULT_KERAS_TPC) -> \
             Tuple[Graph, Graph]:
 
         KerasModelValidation(model=in_model,
diff --git a/tests/keras_tests/function_tests/test_custom_layer.py b/tests/keras_tests/function_tests/test_custom_layer.py
index e82e89884..c7fa6fba8 100644
--- a/tests/keras_tests/function_tests/test_custom_layer.py
+++ b/tests/keras_tests/function_tests/test_custom_layer.py
@@ -60,10 +60,10 @@ def get_tpc():
     Assuming a target hardware that uses power-of-2 thresholds and quantizes weights and activations
     to 2 and 3 bits, accordingly. Our assumed hardware does not require quantization of some layers
     (e.g. Flatten & Droupout).
-    This function generates a TargetPlatformCapabilities with the above specification.
+    This function generates a FrameworkQuantizationCapabilities with the above specification.
 
     Returns:
-         TargetPlatformCapabilities object
+         FrameworkQuantizationCapabilities object
     """
     tp = mct.target_platform
     attr_cfg = generate_test_attr_configs(kernel_lut_values_bitwidth=0)
diff --git a/tests/keras_tests/non_parallel_tests/test_keras_tp_model.py b/tests/keras_tests/non_parallel_tests/test_keras_tp_model.py
index 3b8b387c2..e4610953a 100644
--- a/tests/keras_tests/non_parallel_tests/test_keras_tp_model.py
+++ b/tests/keras_tests/non_parallel_tests/test_keras_tp_model.py
@@ -36,7 +36,7 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import \
     LayerFilterParams
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import \
@@ -113,7 +113,7 @@ def test_get_layers_by_op(self):
             tpc_platform_type=None,
             operator_set=tuple([op_obj]),
             add_metadata=False)
-        fw_tp = TargetPlatformCapabilities(hm)
+        fw_tp = FrameworkQuantizationCapabilities(hm)
         with fw_tp:
             opset_layers = [Conv2D, LayerFilterParams(ReLU, max_value=2)]
             tp.OperationsSetToLayers('opsetA', opset_layers)
@@ -133,7 +133,7 @@ def test_get_layers_by_opconcat(self):
             operator_set=tuple([op_obj_a, op_obj_b]),
             add_metadata=False)
 
-        fw_tp = TargetPlatformCapabilities(hm)
+        fw_tp = FrameworkQuantizationCapabilities(hm)
         with fw_tp:
             opset_layers_a = [Conv2D]
             opset_layers_b = [LayerFilterParams(ReLU, max_value=2)]
@@ -153,7 +153,7 @@ def test_layer_attached_to_multiple_opsets(self):
             add_metadata=False)
 
 
-        fw_tp = TargetPlatformCapabilities(hm)
+        fw_tp = FrameworkQuantizationCapabilities(hm)
         with self.assertRaises(Exception) as e:
             with fw_tp:
                 tp.OperationsSetToLayers('opsetA', [Conv2D])
@@ -169,7 +169,7 @@ def test_filter_layer_attached_to_multiple_opsets(self):
             operator_set=tuple([schema.OperatorsSet(name='opsetA'),
                           schema.OperatorsSet(name='opsetB')]),
             add_metadata=False)
-        fw_tp = TargetPlatformCapabilities(hm)
+        fw_tp = FrameworkQuantizationCapabilities(hm)
         with self.assertRaises(Exception) as e:
             with fw_tp:
                 tp.OperationsSetToLayers('opsetA', [LayerFilterParams(Activation, activation="relu")])
@@ -200,7 +200,7 @@ def test_qco_by_keras_layer(self):
                                          add_metadata=False,
                                          name='test')
 
-        tpc_keras = tp.TargetPlatformCapabilities(tpm)
+        tpc_keras = tp.FrameworkQuantizationCapabilities(tpm)
         with tpc_keras:
             tp.OperationsSetToLayers("conv", [Conv2D],
                                      attr_mapping={KERNEL_ATTR: DefaultDict(default_value=KERAS_KERNEL),
@@ -234,7 +234,7 @@ def test_qco_by_keras_layer(self):
     #                                     tpc_platform_type=None,
     #                                     operator_set=tuple([schema.OperatorsSet(name="opA")]),
     #                                     add_metadata=False)
-    #     hm_keras = tp.TargetPlatformCapabilities(hm)
+    #     hm_keras = tp.FrameworkQuantizationCapabilities(hm)
     #     with self.assertRaises(Exception) as e:
     #         with hm_keras:
     #             tp.OperationsSetToLayers("conv", [Conv2D])
@@ -259,7 +259,7 @@ def test_keras_fusing_patterns(self):
                                         fusing_patterns=tuple(fusing_patterns),
                                         add_metadata=False)
 
-        hm_keras = tp.TargetPlatformCapabilities(hm)
+        hm_keras = tp.FrameworkQuantizationCapabilities(hm)
         with hm_keras:
             tp.OperationsSetToLayers("opA", [Conv2D])
             tp.OperationsSetToLayers("opB", [tf.nn.tanh])
@@ -287,7 +287,7 @@ def test_get_default_op_qc(self):
                                          operator_set=tuple([schema.OperatorsSet(name="opA")]),
                                          add_metadata=False)
 
-        tpc = tp.TargetPlatformCapabilities(tpm)
+        tpc = tp.FrameworkQuantizationCapabilities(tpm)
         with tpc:
             tp.OperationsSetToLayers("opA", [Conv2D])
 
diff --git a/tests/pytorch_tests/function_tests/test_pytorch_tp_model.py b/tests/pytorch_tests/function_tests/test_pytorch_tp_model.py
index 5169ae46a..95a3cc090 100644
--- a/tests/pytorch_tests/function_tests/test_pytorch_tp_model.py
+++ b/tests/pytorch_tests/function_tests/test_pytorch_tp_model.py
@@ -28,7 +28,7 @@
 from model_compression_toolkit.defaultdict import DefaultDict
 from model_compression_toolkit.constants import PYTORCH
 from model_compression_toolkit.core.common import BaseNode
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import \
     LayerFilterParams
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import \
@@ -113,7 +113,7 @@ def test_qco_by_pytorch_layer(self):
                                          add_metadata=False,
                                          name='test')
 
-        tpc_pytorch = tp.TargetPlatformCapabilities(tpm)
+        tpc_pytorch = tp.FrameworkQuantizationCapabilities(tpm)
         with tpc_pytorch:
             tp.OperationsSetToLayers("conv", [torch.nn.Conv2d],
                                      attr_mapping={KERNEL_ATTR: DefaultDict(default_value=PYTORCH_KERNEL),
@@ -155,7 +155,7 @@ def test_get_layers_by_op(self):
             operator_set=tuple([op_obj]),
             add_metadata=False)
 
-        fw_tp = TargetPlatformCapabilities(hm)
+        fw_tp = FrameworkQuantizationCapabilities(hm)
         with fw_tp:
             opset_layers = [torch.nn.Conv2d, LayerFilterParams(torch.nn.Softmax, dim=1)]
             tp.OperationsSetToLayers('opsetA', opset_layers)
@@ -175,7 +175,7 @@ def test_get_layers_by_opconcat(self):
             operator_set=tuple([op_obj_a, op_obj_b]),
             add_metadata=False)
 
-        fw_tp = TargetPlatformCapabilities(hm)
+        fw_tp = FrameworkQuantizationCapabilities(hm)
         with fw_tp:
             opset_layers_a = [torch.nn.Conv2d]
             opset_layers_b = [LayerFilterParams(torch.nn.Softmax, dim=1)]
@@ -195,7 +195,7 @@ def test_layer_attached_to_multiple_opsets(self):
                 schema.OperatorsSet(name='opsetB')]),
             add_metadata=False)
 
-        fw_tp = TargetPlatformCapabilities(hm)
+        fw_tp = FrameworkQuantizationCapabilities(hm)
         with self.assertRaises(Exception) as e:
             with fw_tp:
                 tp.OperationsSetToLayers('opsetA', [torch.nn.Conv2d])
@@ -212,7 +212,7 @@ def test_filter_layer_attached_to_multiple_opsets(self):
                           schema.OperatorsSet(name='opsetB')]),
             add_metadata=False)
 
-        fw_tp = TargetPlatformCapabilities(hm)
+        fw_tp = FrameworkQuantizationCapabilities(hm)
         with self.assertRaises(Exception) as e:
             with fw_tp:
                 tp.OperationsSetToLayers('opsetA', [LayerFilterParams(torch.nn.Softmax, dim=2)])
@@ -228,7 +228,7 @@ def test_filter_layer_attached_to_multiple_opsets(self):
     #                                     tpc_platform_type=None,
     #                                     operator_set=tuple([schema.OperatorsSet(name="opA")]),
     #                                     add_metadata=False)
-    #     hm_pytorch = tp.TargetPlatformCapabilities(hm)
+    #     hm_pytorch = tp.FrameworkQuantizationCapabilities(hm)
     #     with self.assertRaises(Exception) as e:
     #         with hm_pytorch:
     #             tp.OperationsSetToLayers("conv", [torch.nn.Conv2d])
@@ -253,7 +253,7 @@ def test_pytorch_fusing_patterns(self):
                                         fusing_patterns=tuple(fusing_patterns),
                                         add_metadata=False)
 
-        hm_keras = tp.TargetPlatformCapabilities(hm)
+        hm_keras = tp.FrameworkQuantizationCapabilities(hm)
         with hm_keras:
             tp.OperationsSetToLayers("opA", [torch.conv2d])
             tp.OperationsSetToLayers("opB", [torch.tanh])
diff --git a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py
index 337f7dd8b..e9d9e5718 100644
--- a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py
@@ -24,7 +24,7 @@
 from model_compression_toolkit.core.pytorch.reader.node_holders import DummyPlaceHolder
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, PYTORCH_KERNEL, \
     BIAS
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities, OperationsSetToLayers
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities, OperationsSetToLayers
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, OperatorsSet, \
     QuantizationConfigOptions
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
diff --git a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py
index 1fa5fa58e..2c6202755 100644
--- a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py
@@ -23,7 +23,7 @@
 from model_compression_toolkit.core.common.user_info import UserInformation
 from model_compression_toolkit.core.pytorch.constants import BIAS
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, PYTORCH_KERNEL, BIAS_ATTR
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities, \
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities, \
     OperationsSetToLayers
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, OperatorsSet, \
     QuantizationConfigOptions
diff --git a/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py b/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py
index 503d6751a..5bb7582e9 100644
--- a/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py
@@ -20,7 +20,7 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 
diff --git a/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py b/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py
index f3c01b8d7..0d949de35 100644
--- a/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py
@@ -26,7 +26,7 @@
     quantized_model_builder_for_second_moment_correction
 from model_compression_toolkit.core.common.visualization.tensorboard_writer import init_tensorboard_writer
 from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
-from model_compression_toolkit.target_platform_capabilities.target_platform import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.pytorch.constants import EPSILON_VAL, GAMMA, BETA, MOVING_MEAN, MOVING_VARIANCE
 from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
 from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
@@ -350,7 +350,7 @@ def prepare_graph(self,
                       representative_data_gen: Callable,
                       core_config: CoreConfig = CoreConfig(),
                       fw_info: FrameworkInfo = DEFAULT_PYTORCH_INFO,
-                      target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_PYTORCH_INFO) -> \
+                      target_platform_capabilities: FrameworkQuantizationCapabilities = DEFAULT_PYTORCH_INFO) -> \
             Tuple[Graph, Graph]:
 
         tb_w = init_tensorboard_writer(fw_info)
diff --git a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_activation_threshold_search.ipynb b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_activation_threshold_search.ipynb
index 5cd30e82f..ae28d66f8 100644
--- a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_activation_threshold_search.ipynb
+++ b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_activation_threshold_search.ipynb
@@ -290,7 +290,7 @@
    "source": [
     "import model_compression_toolkit as mct\n",
     "\n",
-    "# Get a TargetPlatformCapabilities object that models the hardware for the quantized model inference. Here, for example, we use the default platform that is attached to a Keras layers representation.\n",
+    "# Get a FrameworkQuantizationCapabilities object that models the hardware for the quantized model inference. Here, for example, we use the default platform that is attached to a Keras layers representation.\n",
     "target_platform_cap = mct.get_target_platform_capabilities('tensorflow', 'default')"
    ],
    "metadata": {
diff --git a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_activation_z_score_threshold.ipynb b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_activation_z_score_threshold.ipynb
index b1a67b353..fd3cc7d47 100644
--- a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_activation_z_score_threshold.ipynb
+++ b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_activation_z_score_threshold.ipynb
@@ -274,7 +274,7 @@
    "source": [
     "import model_compression_toolkit as mct\n",
     "\n",
-    "# Get a TargetPlatformCapabilities object that models the hardware for the quantized model inference. Here, for example, we use the default platform that is attached to a Keras layers representation.\n",
+    "# Get a FrameworkQuantizationCapabilities object that models the hardware for the quantized model inference. Here, for example, we use the default platform that is attached to a Keras layers representation.\n",
     "target_platform_cap = mct.get_target_platform_capabilities('tensorflow', 'default')"
    ],
    "metadata": {
diff --git a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_mobilenet_mixed_precision.ipynb b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_mobilenet_mixed_precision.ipynb
index ff5b43c84..dff46ce45 100644
--- a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_mobilenet_mixed_precision.ipynb
+++ b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_mobilenet_mixed_precision.ipynb
@@ -255,7 +255,7 @@
    "execution_count": null,
    "outputs": [],
    "source": [
-    "# Get a TargetPlatformCapabilities object that models the hardware platform for the quantized model inference. Here, for example, we use the default platform that is attached to a Keras layers' representation.\n",
+    "# Get a FrameworkQuantizationCapabilities object that models the hardware platform for the quantized model inference. Here, for example, we use the default platform that is attached to a Keras layers' representation.\n",
     "target_platform_cap  = mct.get_target_platform_capabilities(\"tensorflow\", 'imx500', target_platform_version='v1')"
    ],
    "metadata": {
diff --git a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_post-training_quantization.ipynb b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_post-training_quantization.ipynb
index 65ff4f895..6ebf07f72 100644
--- a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_post-training_quantization.ipynb
+++ b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_post-training_quantization.ipynb
@@ -251,7 +251,7 @@
    "source": [
     "import model_compression_toolkit as mct\n",
     "\n",
-    "# Get a TargetPlatformCapabilities object that models the hardware for the quantized model inference. Here, for example, we use the default platform that is attached to a Keras layers representation.\n",
+    "# Get a FrameworkQuantizationCapabilities object that models the hardware for the quantized model inference. Here, for example, we use the default platform that is attached to a Keras layers representation.\n",
     "target_platform_cap = mct.get_target_platform_capabilities('tensorflow', 'default')"
    ],
    "metadata": {
diff --git a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_pruning_mnist.ipynb b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_pruning_mnist.ipynb
index 92298e512..e53a55365 100644
--- a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_pruning_mnist.ipynb
+++ b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_pruning_mnist.ipynb
@@ -251,7 +251,7 @@
     "                                      tpc_platform_type=\"custom_pruning_notebook_tpc\")\n",
     "\n",
     "    # Return the target platform capabilities\n",
-    "    tpc = tp.TargetPlatformCapabilities(tp_model)\n",
+    "    tpc = tp.FrameworkQuantizationCapabilities(tp_model)\n",
     "    return tpc\n"
    ],
    "metadata": {
diff --git a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb
index e49fc91de..81f622371 100644
--- a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb
+++ b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb
@@ -203,10 +203,10 @@
     "    a symmetric threshold for the weights. The activations are quantized to 3 bits, and the kernel weights\n",
     "    are quantized to 2 bits. Our assumed hardware does not require quantization of some layers\n",
     "    (e.g. Flatten & Droupout).\n",
-    "    This function generates a TargetPlatformCapabilities with the above specification.\n",
+    "    This function generates a FrameworkQuantizationCapabilities with the above specification.\n",
     "\n",
     "    Returns:\n",
-    "         TargetPlatformCapabilities object\n",
+    "         FrameworkQuantizationCapabilities object\n",
     "    \"\"\"\n",
     "\n",
     "    # define a default quantization config for all non-specified weights attributes.\n",
@@ -267,7 +267,7 @@
     "        # Group of linear OperatorsSets such as convolution and matmul.\n",
     "        tp.OperatorsSet(\"LinearOp\")\n",
     "\n",
-    "    tpc = tp.TargetPlatformCapabilities(tp_model)\n",
+    "    tpc = tp.FrameworkQuantizationCapabilities(tp_model)\n",
     "    with tpc:\n",
     "        # No need to quantize Flatten and Dropout layers\n",
     "        tp.OperationsSetToLayers(\"NoQuantization\", [layers.Flatten, layers.Dropout])\n",
diff --git a/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mixed_precision_ptq.ipynb b/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mixed_precision_ptq.ipynb
index f57e5cbdd..e5d17edf2 100644
--- a/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mixed_precision_ptq.ipynb
+++ b/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_mixed_precision_ptq.ipynb
@@ -195,7 +195,7 @@
    "source": [
     "import model_compression_toolkit as mct\n",
     "\n",
-    "# Get a TargetPlatformCapabilities object that models the hardware platform for the quantized model inference. Here, for example, we use the default platform that is attached to a Pytorch layers representation.\n",
+    "# Get a FrameworkQuantizationCapabilities object that models the hardware platform for the quantized model inference. Here, for example, we use the default platform that is attached to a Pytorch layers representation.\n",
     "target_platform_cap = mct.get_target_platform_capabilities('pytorch', 'default')"
    ],
    "metadata": {
diff --git a/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_post_training_quantization.ipynb b/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_post_training_quantization.ipynb
index cf89cf0fb..5df268d1f 100644
--- a/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_post_training_quantization.ipynb
+++ b/tutorials/notebooks/mct_features_notebooks/pytorch/example_pytorch_post_training_quantization.ipynb
@@ -194,7 +194,7 @@
    "source": [
     "import model_compression_toolkit as mct\n",
     "\n",
-    "# Get a TargetPlatformCapabilities object that models the hardware platform for the quantized model inference. Here, for example, we use the default platform that is attached to a Pytorch layers representation.\n",
+    "# Get a FrameworkQuantizationCapabilities object that models the hardware platform for the quantized model inference. Here, for example, we use the default platform that is attached to a Pytorch layers representation.\n",
     "target_platform_cap = mct.get_target_platform_capabilities('pytorch', 'default')"
    ],
    "metadata": {

From bdd202bad3b663c127b73f5f63c620337ade3e15 Mon Sep 17 00:00:00 2001
From: liord <lior.dikstein@altair-semi.com>
Date: Tue, 7 Jan 2025 12:38:35 +0200
Subject: [PATCH 02/18] Rename
 TargetPlatformCapabilities-->FrameworkPlatformCapabilities

---
 ...=> framework_quantization_capabilities.py} | 28 +++++++++----------
 ...rk_quantization_capabilities_component.py} |  2 +-
 2 files changed, 15 insertions(+), 15 deletions(-)
 rename model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/{target_platform_capabilities.py => framework_quantization_capabilities.py} (87%)
 rename model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/{target_platform_capabilities_component.py => framework_quantization_capabilities_component.py} (95%)

diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities.py
similarity index 87%
rename from model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py
rename to model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities.py
index 43574a83e..07a400836 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities.py
+++ b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities.py
@@ -23,7 +23,7 @@
     get_config_options_by_operators_set, get_default_op_quantization_config, get_opset_by_name
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.operations_to_layers import \
     OperationsToLayers, OperationsSetToLayers
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.target_platform_capabilities_component import TargetPlatformCapabilitiesComponent
+from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.framework_quantization_capabilities_component import FrameworkQuantizationCapabilitiesComponent
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.layer_filter_params import LayerFilterParams
 from model_compression_toolkit.target_platform_capabilities.immutable import ImmutableClass
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, OperatorsSetBase, \
@@ -31,7 +31,7 @@
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.current_tpc import _current_tpc
 
 
-class TargetPlatformCapabilities(ImmutableClass):
+class FrameworkQuantizationCapabilities(ImmutableClass):
     """
     Attach framework information to a modeled hardware.
     """
@@ -42,12 +42,12 @@ def __init__(self,
 
         Args:
             tp_model (TargetPlatformModel): Modeled hardware to attach framework information to.
-            name (str): Name of the TargetPlatformCapabilities.
+            name (str): Name of the FrameworkQuantizationCapabilities.
         """
 
         super().__init__()
         self.name = name
-        assert isinstance(tp_model, TargetPlatformModel), f'Target platform model that was passed to TargetPlatformCapabilities must be of type TargetPlatformModel, but has type of {type(tp_model)}'
+        assert isinstance(tp_model, TargetPlatformModel), f'Target platform model that was passed to FrameworkQuantizationCapabilities must be of type TargetPlatformModel, but has type of {type(tp_model)}'
         self.tp_model = tp_model
         self.op_sets_to_layers = OperationsToLayers() # Init an empty OperationsToLayers
         self.layer2qco, self.filterlayer2qco = {}, {} # Init empty mappings from layers/LayerFilterParams to QC options
@@ -68,7 +68,7 @@ def get_layers_by_opset_name(self, opset_name: str) -> List[Any]:
         """
         opset = get_opset_by_name(self.tp_model, opset_name)
         if opset is None:
-            Logger.warning(f'{opset_name} was not found in TargetPlatformCapabilities.')
+            Logger.warning(f'{opset_name} was not found in FrameworkQuantizationCapabilities.')
             return None
         return self.get_layers_by_opset(opset)
 
@@ -111,7 +111,7 @@ def get_fusing_patterns(self) -> List[List[Any]]:
     def get_info(self) -> Dict[str, Any]:
         """
 
-        Returns: Summarization of information in the TargetPlatformCapabilities.
+        Returns: Summarization of information in the FrameworkQuantizationCapabilities.
 
         """
         return {"Target Platform Capabilities": self.name,
@@ -124,34 +124,34 @@ def get_info(self) -> Dict[str, Any]:
     def show(self):
         """
 
-        Display the TargetPlatformCapabilities.
+        Display the FrameworkQuantizationCapabilities.
 
         """
         pprint.pprint(self.get_info(), sort_dicts=False, width=110)
 
-    def append_component(self, tpc_component: TargetPlatformCapabilitiesComponent):
+    def append_component(self, tpc_component: FrameworkQuantizationCapabilitiesComponent):
         """
-        Append a Component (like OperationsSetToLayers) to the TargetPlatformCapabilities.
+        Append a Component (like OperationsSetToLayers) to the FrameworkQuantizationCapabilities.
 
         Args:
-            tpc_component: Component to append to TargetPlatformCapabilities.
+            tpc_component: Component to append to FrameworkQuantizationCapabilities.
 
         """
         if isinstance(tpc_component, OperationsSetToLayers):
             self.op_sets_to_layers += tpc_component
         else:
-            Logger.critical(f"Attempt to append an unrecognized 'TargetPlatformCapabilitiesComponent' of type: '{type(tpc_component)}'. Ensure the component is compatible.")  # pragma: no cover
+            Logger.critical(f"Attempt to append an unrecognized 'FrameworkQuantizationCapabilitiesComponent' of type: '{type(tpc_component)}'. Ensure the component is compatible.")  # pragma: no cover
 
     def __enter__(self):
         """
-        Init a TargetPlatformCapabilities object.
+        Init a FrameworkQuantizationCapabilities object.
         """
         _current_tpc.set(self)
         return self
 
     def __exit__(self, exc_type, exc_value, tb):
         """
-        Finalize a TargetPlatformCapabilities object.
+        Finalize a FrameworkQuantizationCapabilities object.
         """
         if exc_value is not None:
             print(exc_value, exc_value.args)
@@ -165,7 +165,7 @@ def get_default_op_qc(self) -> OpQuantizationConfig:
         """
 
         Returns: The default OpQuantizationConfig of the TargetPlatformModel that is attached
-        to the TargetPlatformCapabilities.
+        to the FrameworkQuantizationCapabilities.
 
         """
         return get_default_op_quantization_config(self.tp_model)
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities_component.py
similarity index 95%
rename from model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py
rename to model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities_component.py
index e352abaf0..6341a0e69 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/target_platform_capabilities_component.py
+++ b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities_component.py
@@ -16,7 +16,7 @@
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.current_tpc import  _current_tpc
 
 
-class TargetPlatformCapabilitiesComponent:
+class FrameworkQuantizationCapabilitiesComponent:
     def __init__(self, name: str):
         self.name = name
         _current_tpc.get().append_component(self)

From 0a59dff8df56816fd4b3f990170b61321b466778 Mon Sep 17 00:00:00 2001
From: liord <lior.dikstein@altair-semi.com>
Date: Tue, 7 Jan 2025 14:26:42 +0200
Subject: [PATCH 03/18] Rename OperatorSetConcat-->OperatorSetGroup

---
 .../api/api_docs/modules/target_platform.rst       |  4 ++--
 .../schema/mct_current_schema.py                   |  2 +-
 .../target_platform_capabilities/schema/v1.py      | 12 ++++++------
 .../target_platform/__init__.py                    |  2 +-
 .../operations_to_layers.py                        |  4 ++--
 .../tpc_models/imx500_tpc/v1/tp_model.py           | 12 ++++++------
 .../tpc_models/qnnpack_tpc/v1/tp_model.py          |  4 ++--
 .../tpc_models/tflite_tpc/v1/tp_model.py           |  4 ++--
 .../helpers/tpcs_for_tests/v1/tp_model.py          | 12 ++++++------
 .../helpers/tpcs_for_tests/v1_lut/tp_model.py      | 12 ++++++------
 .../helpers/tpcs_for_tests/v1_pot/tp_model.py      | 12 ++++++------
 .../helpers/tpcs_for_tests/v2/tp_model.py          | 12 ++++++------
 .../helpers/tpcs_for_tests/v2_lut/tp_model.py      | 12 ++++++------
 .../helpers/tpcs_for_tests/v3/tp_model.py          | 12 ++++++------
 .../helpers/tpcs_for_tests/v3_lut/tp_model.py      | 12 ++++++------
 .../helpers/tpcs_for_tests/v4/tp_model.py          | 14 +++++++-------
 tests/common_tests/test_tp_model.py                |  4 ++--
 .../function_tests/test_layer_fusing.py            |  4 ++--
 .../non_parallel_tests/test_keras_tp_model.py      |  2 +-
 .../function_tests/layer_fusing_test.py            |  2 +-
 .../function_tests/test_pytorch_tp_model.py        |  2 +-
 21 files changed, 78 insertions(+), 78 deletions(-)

diff --git a/docsrc/source/api/api_docs/modules/target_platform.rst b/docsrc/source/api/api_docs/modules/target_platform.rst
index 37733ec66..c60611dfa 100644
--- a/docsrc/source/api/api_docs/modules/target_platform.rst
+++ b/docsrc/source/api/api_docs/modules/target_platform.rst
@@ -73,9 +73,9 @@ Fusing
 
 
 
-OperatorSetConcat
+OperatorSetGroup
 ====================
-.. autoclass:: model_compression_toolkit.target_platform.OperatorSetConcat
+.. autoclass:: model_compression_toolkit.target_platform.OperatorSetGroup
 
 
 OperationsToLayers
diff --git a/model_compression_toolkit/target_platform_capabilities/schema/mct_current_schema.py b/model_compression_toolkit/target_platform_capabilities/schema/mct_current_schema.py
index c751049fe..4bc78dc91 100644
--- a/model_compression_toolkit/target_platform_capabilities/schema/mct_current_schema.py
+++ b/model_compression_toolkit/target_platform_capabilities/schema/mct_current_schema.py
@@ -7,6 +7,6 @@
 QuantizationConfigOptions = schema.QuantizationConfigOptions
 OperatorsSetBase = schema.OperatorsSetBase
 OperatorsSet = schema.OperatorsSet
-OperatorSetConcat = schema.OperatorSetConcat
+OperatorSetGroup = schema.OperatorSetGroup
 Fusing = schema.Fusing
 TargetPlatformModel = schema.TargetPlatformModel
diff --git a/model_compression_toolkit/target_platform_capabilities/schema/v1.py b/model_compression_toolkit/target_platform_capabilities/schema/v1.py
index d4df977d9..5a3ae093b 100644
--- a/model_compression_toolkit/target_platform_capabilities/schema/v1.py
+++ b/model_compression_toolkit/target_platform_capabilities/schema/v1.py
@@ -457,7 +457,7 @@ def get_info(self) -> Dict[str, Any]:
         return {"name": self.name}
 
 
-class OperatorSetConcat(OperatorsSetBase):
+class OperatorSetGroup(OperatorsSetBase):
     """
     Concatenate a tuple of operator sets to treat them similarly in different places (like fusing).
 
@@ -469,7 +469,7 @@ class OperatorSetConcat(OperatorsSetBase):
     name: Optional[str] = None  # Will be set in the validator if not given
 
     # Define a private attribute _type
-    type: Literal["OperatorSetConcat"] = "OperatorSetConcat"
+    type: Literal["OperatorSetGroup"] = "OperatorSetGroup"
 
     class Config:
         frozen = True
@@ -518,11 +518,11 @@ class Fusing(TargetPlatformModelComponent):
     hence no quantization is applied between them.
 
     Attributes:
-        operator_groups (Tuple[Union[OperatorsSet, OperatorSetConcat], ...]): A tuple of operator groups,
-                                                                              each being either an OperatorSetConcat or an OperatorsSet.
+        operator_groups (Tuple[Union[OperatorsSet, OperatorSetGroup], ...]): A tuple of operator groups,
+                                                                              each being either an OperatorSetGroup or an OperatorsSet.
         name (Optional[str]): The name for the Fusing instance. If not provided, it is generated from the operator groups' names.
     """
-    operator_groups: Tuple[Annotated[Union[OperatorsSet, OperatorSetConcat], Field(discriminator='type')], ...]
+    operator_groups: Tuple[Annotated[Union[OperatorsSet, OperatorSetGroup], Field(discriminator='type')], ...]
     name: Optional[str] = None  # Will be set in the validator if not given.
 
     class Config:
@@ -591,7 +591,7 @@ def contains(self, other: Any) -> bool:
         for i in range(len(self.operator_groups) - len(other.operator_groups) + 1):
             for j in range(len(other.operator_groups)):
                 if self.operator_groups[i + j] != other.operator_groups[j] and not (
-                        isinstance(self.operator_groups[i + j], OperatorSetConcat) and (
+                        isinstance(self.operator_groups[i + j], OperatorSetGroup) and (
                         other.operator_groups[j] in self.operator_groups[i + j].operators_set)):
                     break
             else:
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py b/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py
index 0f9631aca..006de8bca 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py
+++ b/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py
@@ -16,7 +16,7 @@
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import AttributeFilter
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities, OperationsSetToLayers, Smaller, SmallerEq, NotEq, Eq, GreaterEq, Greater, LayerFilterParams, OperationsToLayers, get_current_tpc
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, OperatorsSet, \
-    OperatorSetConcat, Signedness, AttributeQuantizationConfig, OpQuantizationConfig, QuantizationConfigOptions, Fusing
+    OperatorSetGroup, Signedness, AttributeQuantizationConfig, OpQuantizationConfig, QuantizationConfigOptions, Fusing
 
 from mct_quantizers import QuantizationMethod
 
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py
index bedae4f6d..b80dcff0b 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py
+++ b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py
@@ -20,7 +20,7 @@
     get_config_options_by_operators_set, is_opset_in_model
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.current_tpc import  _current_tpc
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.framework_quantization_capabilities_component import FrameworkQuantizationCapabilitiesComponent
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OperatorsSetBase, OperatorSetConcat
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OperatorsSetBase, OperatorSetGroup
 from model_compression_toolkit import DefaultDict
 
 
@@ -88,7 +88,7 @@ def get_layers_by_op(self,
         for o in self.op_sets_to_layers:
             if op.name == o.name:
                 return o.layers
-        if isinstance(op, OperatorSetConcat):  # If its a concat - return all layers from all OperatorsSets that in the OperatorSetConcat
+        if isinstance(op, OperatorSetGroup):  # If its a concat - return all layers from all OperatorsSets that in the OperatorSetGroup
             layers = []
             for o in op.operators_set:
                 layers.extend(self.get_layers_by_op(o))
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py
index 238e5dd93..2bd8f6c1c 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py
@@ -218,14 +218,14 @@ def generate_tp_model(default_config: OpQuantizationConfig,
 
     operator_set.extend([conv, conv_transpose, depthwise_conv, fc, relu, relu6, leaky_relu, add, sub, mul, div, prelu, swish,
                          hard_swish, sigmoid, tanh, hard_tanh])
-    any_relu = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh])
+    any_relu = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh])
     # Combine multiple operators into a single operator to avoid quantization between
     # them. To do this we define fusing patterns using the OperatorsSets that were created.
-    # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
-    activations_after_conv_to_fuse = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, prelu, sigmoid, tanh])
-    conv_types = schema.OperatorSetConcat(operators_set=[conv, conv_transpose, depthwise_conv])
-    activations_after_fc_to_fuse = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, sigmoid])
-    any_binary = schema.OperatorSetConcat(operators_set=[add, sub, mul, div])
+    # To group multiple sets with regard to fusing, an OperatorSetGroup can be created
+    activations_after_conv_to_fuse = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, prelu, sigmoid, tanh])
+    conv_types = schema.OperatorSetGroup(operators_set=[conv, conv_transpose, depthwise_conv])
+    activations_after_fc_to_fuse = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, sigmoid])
+    any_binary = schema.OperatorSetGroup(operators_set=[add, sub, mul, div])
 
     # ------------------- #
     # Fusions
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py
index 6ac1bc040..e500d2977 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py
@@ -160,8 +160,8 @@ def generate_tp_model(default_config: OpQuantizationConfig,
 
     operator_set.extend([conv, conv_depthwise, conv_transpose, batchnorm, relu, relu6, hard_tanh, linear])
 
-    conv_opset_concat = schema.OperatorSetConcat(operators_set=[conv, conv_transpose])
-    relu_opset_concat = schema.OperatorSetConcat(operators_set=[relu, relu6, hard_tanh])
+    conv_opset_concat = schema.OperatorSetGroup(operators_set=[conv, conv_transpose])
+    relu_opset_concat = schema.OperatorSetGroup(operators_set=[relu, relu6, hard_tanh])
 
     # ------------------- #
     # Fusions
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py
index 965695dc3..720781739 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py
@@ -193,8 +193,8 @@ def generate_tp_model(default_config: OpQuantizationConfig,
     add = schema.OperatorsSet(name=schema.OperatorSetNames.ADD)
     bias_add = schema.OperatorsSet(name=schema.OperatorSetNames.ADD_BIAS)
 
-    kernel = schema.OperatorSetConcat(operators_set=[conv2d, fc])
-    activations_to_fuse = schema.OperatorSetConcat(operators_set=[relu, elu])
+    kernel = schema.OperatorSetGroup(operators_set=[conv2d, fc])
+    activations_to_fuse = schema.OperatorSetGroup(operators_set=[relu, elu])
 
     operator_set.extend([fc, conv2d, relu, relu6, tanh, sigmoid, batch_norm, add, bias_add, elu, squeeze])
 
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v1/tp_model.py
index fb9b3ff7d..f3ee0b2c2 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1/tp_model.py
@@ -219,14 +219,14 @@ def generate_tp_model(default_config: OpQuantizationConfig,
 
     operator_set.extend([conv, conv_transpose, depthwise_conv, fc, relu, relu6, leaky_relu, add, sub, mul, div, prelu, swish,
                          hard_swish, sigmoid, tanh, hard_tanh])
-    any_relu = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh])
+    any_relu = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh])
     # Combine multiple operators into a single operator to avoid quantization between
     # them. To do this we define fusing patterns using the OperatorsSets that were created.
-    # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
-    activations_after_conv_to_fuse = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, prelu, sigmoid, tanh])
-    conv_types = schema.OperatorSetConcat(operators_set=[conv, conv_transpose, depthwise_conv])
-    activations_after_fc_to_fuse = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, sigmoid])
-    any_binary = schema.OperatorSetConcat(operators_set=[add, sub, mul, div])
+    # To group multiple sets with regard to fusing, an OperatorSetGroup can be created
+    activations_after_conv_to_fuse = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, prelu, sigmoid, tanh])
+    conv_types = schema.OperatorSetGroup(operators_set=[conv, conv_transpose, depthwise_conv])
+    activations_after_fc_to_fuse = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, sigmoid])
+    any_binary = schema.OperatorSetGroup(operators_set=[add, sub, mul, div])
 
     # ------------------- #
     # Fusions
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tp_model.py
index 14885e841..14d033d9d 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tp_model.py
@@ -249,16 +249,16 @@ def generate_tp_model(default_config: OpQuantizationConfig,
         [conv, conv_transpose, depthwise_conv, fc, relu, relu6, leaky_relu, add, sub, mul, div, prelu, swish,
          hard_swish, sigmoid,
          tanh, hard_tanh])
-    any_relu = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh])
+    any_relu = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh])
     # Combine multiple operators into a single operator to avoid quantization between
     # them. To do this we define fusing patterns using the OperatorsSets that were created.
-    # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
-    activations_after_conv_to_fuse = schema.OperatorSetConcat(
+    # To group multiple sets with regard to fusing, an OperatorSetGroup can be created
+    activations_after_conv_to_fuse = schema.OperatorSetGroup(
         operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, prelu, sigmoid, tanh])
-    conv_types = schema.OperatorSetConcat(operators_set=[conv, conv_transpose, depthwise_conv])
-    activations_after_fc_to_fuse = schema.OperatorSetConcat(
+    conv_types = schema.OperatorSetGroup(operators_set=[conv, conv_transpose, depthwise_conv])
+    activations_after_fc_to_fuse = schema.OperatorSetGroup(
         operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, sigmoid])
-    any_binary = schema.OperatorSetConcat(operators_set=[add, sub, mul, div])
+    any_binary = schema.OperatorSetGroup(operators_set=[add, sub, mul, div])
 
     # ------------------- #
     # Fusions
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tp_model.py
index c463f0d0f..0dbd50dd3 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tp_model.py
@@ -245,16 +245,16 @@ def generate_tp_model(default_config: OpQuantizationConfig,
         [conv, conv_transpose, depthwise_conv, fc, relu, relu6, leaky_relu, add, sub, mul, div, prelu, swish,
          hard_swish, sigmoid,
          tanh, hard_tanh])
-    any_relu = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh])
+    any_relu = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh])
     # Combine multiple operators into a single operator to avoid quantization between
     # them. To do this we define fusing patterns using the OperatorsSets that were created.
-    # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
-    activations_after_conv_to_fuse = schema.OperatorSetConcat(
+    # To group multiple sets with regard to fusing, an OperatorSetGroup can be created
+    activations_after_conv_to_fuse = schema.OperatorSetGroup(
         operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, prelu, sigmoid, tanh])
-    conv_types = schema.OperatorSetConcat(operators_set=[conv, conv_transpose, depthwise_conv])
-    activations_after_fc_to_fuse = schema.OperatorSetConcat(
+    conv_types = schema.OperatorSetGroup(operators_set=[conv, conv_transpose, depthwise_conv])
+    activations_after_fc_to_fuse = schema.OperatorSetGroup(
         operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, sigmoid])
-    any_binary = schema.OperatorSetConcat(operators_set=[add, sub, mul, div])
+    any_binary = schema.OperatorSetGroup(operators_set=[add, sub, mul, div])
 
     # ------------------- #
     # Fusions
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v2/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v2/tp_model.py
index 16c6c2381..5f8d7f85a 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v2/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v2/tp_model.py
@@ -229,15 +229,15 @@ def generate_tp_model(default_config: OpQuantizationConfig,
     operator_set.extend(
         [conv, conv_transpose, depthwise_conv, fc, relu, relu6, leaky_relu, add, sub, mul, div, prelu, swish, hard_swish, sigmoid,
          tanh, hard_tanh])
-    any_relu = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh])
+    any_relu = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh])
     # Combine multiple operators into a single operator to avoid quantization between
     # them. To do this we define fusing patterns using the OperatorsSets that were created.
-    # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
-    activations_after_conv_to_fuse = schema.OperatorSetConcat(
+    # To group multiple sets with regard to fusing, an OperatorSetGroup can be created
+    activations_after_conv_to_fuse = schema.OperatorSetGroup(
         operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, prelu, sigmoid, tanh])
-    conv_types = schema.OperatorSetConcat(operators_set=[conv, conv_transpose, depthwise_conv])
-    activations_after_fc_to_fuse = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, sigmoid])
-    any_binary = schema.OperatorSetConcat(operators_set=[add, sub, mul, div])
+    conv_types = schema.OperatorSetGroup(operators_set=[conv, conv_transpose, depthwise_conv])
+    activations_after_fc_to_fuse = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, sigmoid])
+    any_binary = schema.OperatorSetGroup(operators_set=[add, sub, mul, div])
 
     # ------------------- #
     # Fusions
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tp_model.py
index 4aa691d46..f6a666a20 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tp_model.py
@@ -251,16 +251,16 @@ def generate_tp_model(default_config: OpQuantizationConfig,
         [conv, conv_transpose, depthwise_conv, fc, relu, relu6, leaky_relu, add, sub, mul, div, prelu, swish,
          hard_swish, sigmoid,
          tanh, hard_tanh])
-    any_relu = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh])
+    any_relu = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh])
     # Combine multiple operators into a single operator to avoid quantization between
     # them. To do this we define fusing patterns using the OperatorsSets that were created.
-    # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
-    activations_after_conv_to_fuse = schema.OperatorSetConcat(
+    # To group multiple sets with regard to fusing, an OperatorSetGroup can be created
+    activations_after_conv_to_fuse = schema.OperatorSetGroup(
         operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, prelu, sigmoid, tanh])
-    conv_types = schema.OperatorSetConcat(operators_set=[conv, conv_transpose, depthwise_conv])
-    activations_after_fc_to_fuse = schema.OperatorSetConcat(
+    conv_types = schema.OperatorSetGroup(operators_set=[conv, conv_transpose, depthwise_conv])
+    activations_after_fc_to_fuse = schema.OperatorSetGroup(
         operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, sigmoid])
-    any_binary = schema.OperatorSetConcat(operators_set=[add, sub, mul, div])
+    any_binary = schema.OperatorSetGroup(operators_set=[add, sub, mul, div])
 
     # ------------------- #
     # Fusions
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v3/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v3/tp_model.py
index 5f2932d98..0c30b8b34 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v3/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v3/tp_model.py
@@ -259,15 +259,15 @@ def generate_tp_model(default_config: OpQuantizationConfig,
     operator_set.extend(
         [conv, conv_transpose, depthwise_conv, fc, relu, relu6, leaky_relu, add, sub, mul, div, prelu, swish, hard_swish, sigmoid,
          tanh, hard_tanh])
-    any_relu = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh])
+    any_relu = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh])
 
     # Combine multiple operators into a single operator to avoid quantization between
     # them. To do this, we define fusing patterns using the OperatorsSets that were created.
-    # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
-    activations_after_conv_to_fuse = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, prelu, sigmoid, tanh])
-    conv_types = schema.OperatorSetConcat(operators_set=[conv, conv_transpose, depthwise_conv])
-    activations_after_fc_to_fuse = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, sigmoid])
-    any_binary = schema.OperatorSetConcat(operators_set=[add, sub, mul, div])
+    # To group multiple sets with regard to fusing, an OperatorSetGroup can be created
+    activations_after_conv_to_fuse = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, prelu, sigmoid, tanh])
+    conv_types = schema.OperatorSetGroup(operators_set=[conv, conv_transpose, depthwise_conv])
+    activations_after_fc_to_fuse = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, sigmoid])
+    any_binary = schema.OperatorSetGroup(operators_set=[add, sub, mul, div])
 
     # ------------------- #
     # Fusions
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tp_model.py
index c238f6002..e5bfdb8f6 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tp_model.py
@@ -282,17 +282,17 @@ def generate_tp_model(default_config: OpQuantizationConfig,
         [conv, conv_transpose, depthwise_conv, fc, relu, relu6, leaky_relu, add, sub, mul, div, prelu, swish,
          hard_swish, sigmoid,
          tanh, hard_tanh])
-    any_relu = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh])
+    any_relu = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh])
 
     # Combine multiple operators into a single operator to avoid quantization between
     # them. To do this, we define fusing patterns using the OperatorsSets that were created.
-    # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
-    activations_after_conv_to_fuse = schema.OperatorSetConcat(
+    # To group multiple sets with regard to fusing, an OperatorSetGroup can be created
+    activations_after_conv_to_fuse = schema.OperatorSetGroup(
         operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, prelu, sigmoid, tanh])
-    conv_types = schema.OperatorSetConcat(operators_set=[conv, conv_transpose, depthwise_conv])
-    activations_after_fc_to_fuse = schema.OperatorSetConcat(
+    conv_types = schema.OperatorSetGroup(operators_set=[conv, conv_transpose, depthwise_conv])
+    activations_after_fc_to_fuse = schema.OperatorSetGroup(
         operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, hard_swish, sigmoid])
-    any_binary = schema.OperatorSetConcat(operators_set=[add, sub, mul, div])
+    any_binary = schema.OperatorSetGroup(operators_set=[add, sub, mul, div])
 
     # ------------------- #
     # Fusions
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v4/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v4/tp_model.py
index ec5dc46f3..0a32a5c1e 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v4/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v4/tp_model.py
@@ -291,17 +291,17 @@ def generate_tp_model(default_config: OpQuantizationConfig,
     operator_set.extend(
         [conv, conv_transpose, depthwise_conv, fc, relu, relu6, leaky_relu, add, sub, mul, div, prelu, swish, hardswish, sigmoid,
          tanh, gelu, hardsigmoid, hard_tanh])
-    any_relu = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh])
+    any_relu = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh])
 
     # Combine multiple operators into a single operator to avoid quantization between
     # them. To do this we define fusing patterns using the OperatorsSets that were created.
-    # To group multiple sets with regard to fusing, an OperatorSetConcat can be created
-    activations_after_conv_to_fuse = schema.OperatorSetConcat(
+    # To group multiple sets with regard to fusing, an OperatorSetGroup can be created
+    activations_after_conv_to_fuse = schema.OperatorSetGroup(
         operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, gelu, hardswish, hardsigmoid, prelu, sigmoid, tanh])
-    conv_types = schema.OperatorSetConcat(operators_set=[conv, conv_transpose, depthwise_conv])
-    activations_after_fc_to_fuse = schema.OperatorSetConcat(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, sigmoid, tanh, gelu,
-                                                             hardswish, hardsigmoid])
-    any_binary = schema.OperatorSetConcat(operators_set=[add, sub, mul, div])
+    conv_types = schema.OperatorSetGroup(operators_set=[conv, conv_transpose, depthwise_conv])
+    activations_after_fc_to_fuse = schema.OperatorSetGroup(operators_set=[relu, relu6, leaky_relu, hard_tanh, swish, sigmoid, tanh, gelu,
+                                                                          hardswish, hardsigmoid])
+    any_binary = schema.OperatorSetGroup(operators_set=[add, sub, mul, div])
 
     # ------------------- #
     # Fusions
diff --git a/tests/common_tests/test_tp_model.py b/tests/common_tests/test_tp_model.py
index 811c5255a..fb37a693c 100644
--- a/tests/common_tests/test_tp_model.py
+++ b/tests/common_tests/test_tp_model.py
@@ -44,7 +44,7 @@ def setUp(self):
         op1 = schema.OperatorsSet(name="opset1")
         op2 = schema.OperatorsSet(name="opset2")
         op3 = schema.OperatorsSet(name="opset3")
-        op12 = schema.OperatorSetConcat(operators_set=[op1, op2])
+        op12 = schema.OperatorSetGroup(operators_set=[op1, op2])
         self.tp_model = schema.TargetPlatformModel(default_qco=TEST_QCO,
                                                    operator_set=(op1, op2, op3),
                                                    fusing_patterns=(schema.Fusing(operator_groups=(op12, op3)),
@@ -327,7 +327,7 @@ def test_fusing_contains_with_opset_concat(self):
         tanh = schema.OperatorsSet(name="tanh")
         operator_set.extend([conv, add, tanh])
 
-        add_tanh = schema.OperatorSetConcat(operators_set=[add, tanh])
+        add_tanh = schema.OperatorSetGroup(operators_set=[add, tanh])
         fusing_patterns.append(schema.Fusing(operator_groups=(conv, add)))
         fusing_patterns.append(schema.Fusing(operator_groups=(conv, add_tanh)))
         fusing_patterns.append(schema.Fusing(operator_groups=(conv, add, tanh)))
diff --git a/tests/keras_tests/function_tests/test_layer_fusing.py b/tests/keras_tests/function_tests/test_layer_fusing.py
index 730f78115..9068c805b 100644
--- a/tests/keras_tests/function_tests/test_layer_fusing.py
+++ b/tests/keras_tests/function_tests/test_layer_fusing.py
@@ -119,7 +119,7 @@ def get_tpc_2():
     sigmoid = schema.OperatorsSet(name="Sigmoid")
     tanh = schema.OperatorsSet(name="Tanh")
     operator_set = [conv, any_relu, swish, sigmoid, tanh]
-    activations_after_conv_to_fuse = schema.OperatorSetConcat(operators_set=[any_relu, swish, sigmoid, tanh])
+    activations_after_conv_to_fuse = schema.OperatorSetGroup(operators_set=[any_relu, swish, sigmoid, tanh])
     # Define fusions
     fusing_patterns = [schema.Fusing(operator_groups=(conv, activations_after_conv_to_fuse))]
 
@@ -151,7 +151,7 @@ def get_tpc_4():
     any_relu = schema.OperatorsSet(name="AnyReLU")
     add = schema.OperatorsSet(name="Add")
     swish = schema.OperatorsSet(name="Swish")
-    activations_to_fuse = schema.OperatorSetConcat(operators_set=[any_relu, swish])
+    activations_to_fuse = schema.OperatorSetGroup(operators_set=[any_relu, swish])
     operator_set = [conv, fc, any_relu, add, swish]
     # Define fusions
     fusing_patterns = [schema.Fusing(operator_groups=(conv, activations_to_fuse)),
diff --git a/tests/keras_tests/non_parallel_tests/test_keras_tp_model.py b/tests/keras_tests/non_parallel_tests/test_keras_tp_model.py
index e4610953a..8037e3380 100644
--- a/tests/keras_tests/non_parallel_tests/test_keras_tp_model.py
+++ b/tests/keras_tests/non_parallel_tests/test_keras_tp_model.py
@@ -124,7 +124,7 @@ def test_get_layers_by_op(self):
     def test_get_layers_by_opconcat(self):
         op_obj_a = schema.OperatorsSet(name='opsetA')
         op_obj_b = schema.OperatorsSet(name='opsetB')
-        op_concat = schema.OperatorSetConcat(operators_set=[op_obj_a, op_obj_b])
+        op_concat = schema.OperatorSetGroup(operators_set=[op_obj_a, op_obj_b])
         hm = schema.TargetPlatformModel(
             default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])),
             tpc_minor_version=None,
diff --git a/tests/pytorch_tests/function_tests/layer_fusing_test.py b/tests/pytorch_tests/function_tests/layer_fusing_test.py
index 0ab6db09e..12c31892f 100644
--- a/tests/pytorch_tests/function_tests/layer_fusing_test.py
+++ b/tests/pytorch_tests/function_tests/layer_fusing_test.py
@@ -248,7 +248,7 @@ def get_tpc(self):
         add = schema.OperatorsSet(name=schema.OperatorSetNames.ADD)
         swish = schema.OperatorsSet(name=schema.OperatorSetNames.SWISH)
         operator_set = [conv, fc, relu, add, swish]
-        activations_to_fuse = schema.OperatorSetConcat(operators_set=[relu, swish])
+        activations_to_fuse = schema.OperatorSetGroup(operators_set=[relu, swish])
         # Define fusions
         fusing_patterns = [schema.Fusing(operator_groups=(conv, activations_to_fuse)),
                            schema.Fusing(operator_groups=(conv, add, activations_to_fuse)),
diff --git a/tests/pytorch_tests/function_tests/test_pytorch_tp_model.py b/tests/pytorch_tests/function_tests/test_pytorch_tp_model.py
index 95a3cc090..584509704 100644
--- a/tests/pytorch_tests/function_tests/test_pytorch_tp_model.py
+++ b/tests/pytorch_tests/function_tests/test_pytorch_tp_model.py
@@ -165,7 +165,7 @@ def test_get_layers_by_op(self):
     def test_get_layers_by_opconcat(self):
         op_obj_a = schema.OperatorsSet(name='opsetA')
         op_obj_b = schema.OperatorsSet(name='opsetB')
-        op_concat = schema.OperatorSetConcat(operators_set=[op_obj_a, op_obj_b])
+        op_concat = schema.OperatorSetGroup(operators_set=[op_obj_a, op_obj_b])
 
         hm = schema.TargetPlatformModel(
             default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])),

From 2cb52cd751ac0822c63c944ce5fe4825abe61eff Mon Sep 17 00:00:00 2001
From: liord <lior.dikstein@altair-semi.com>
Date: Tue, 7 Jan 2025 14:31:25 +0200
Subject: [PATCH 04/18] Rename TargetPlatformModel-->TargetPlatformCapabilities

---
 .../api/api_docs/modules/target_platform.rst  |   4 +-
 .../core/common/graph/base_node.py            |   2 +-
 .../keras/resource_utilization_data_facade.py |   4 +-
 .../resource_utilization_data_facade.py       |   4 +-
 .../gptq/keras/quantization_facade.py         |   4 +-
 .../gptq/pytorch/quantization_facade.py       |   4 +-
 .../pruning/keras/pruning_facade.py           |   4 +-
 .../pruning/pytorch/pruning_facade.py         |   4 +-
 .../ptq/keras/quantization_facade.py          |   4 +-
 .../ptq/pytorch/quantization_facade.py        |   4 +-
 .../qat/keras/quantization_facade.py          |   4 +-
 .../qat/pytorch/quantization_facade.py        |   4 +-
 .../schema/mct_current_schema.py              |   2 +-
 .../schema/schema_functions.py                |  22 ++--
 .../target_platform_capabilities/schema/v1.py |  12 +--
 .../target_platform/__init__.py               |   2 +-
 .../targetplatform2framework/attach2fw.py     |  10 +-
 .../framework_quantization_capabilities.py    |  10 +-
 .../operations_to_layers.py                   |   4 +-
 .../tpc_io_handler.py                         |  34 +++---
 .../get_target_platform_capabilities.py       |  18 ++--
 .../tpc_models/imx500_tpc/v1/tp_model.py      |  22 ++--
 .../tpc_models/qnnpack_tpc/v1/tp_model.py     |  22 ++--
 .../tpc_models/tflite_tpc/v1/tp_model.py      |  22 ++--
 .../helpers/generate_test_tp_model.py         |   6 +-
 .../helpers/tpcs_for_tests/v1/tp_model.py     |  22 ++--
 .../helpers/tpcs_for_tests/v1_lut/tp_model.py |  22 ++--
 .../helpers/tpcs_for_tests/v1_pot/tp_model.py |  22 ++--
 .../helpers/tpcs_for_tests/v2/tp_model.py     |  22 ++--
 .../helpers/tpcs_for_tests/v2_lut/tp_model.py |  22 ++--
 .../helpers/tpcs_for_tests/v3/tp_model.py     |  22 ++--
 .../helpers/tpcs_for_tests/v3_lut/tp_model.py |  22 ++--
 .../helpers/tpcs_for_tests/v4/tp_model.py     |  22 ++--
 tests/common_tests/test_tp_model.py           | 100 +++++++++---------
 .../tflite_int8/imx500_int8_tp_model.py       |   8 +-
 .../bn_attributes_quantization_test.py        |   2 +-
 .../feature_networks/mixed_precision_tests.py |   2 +-
 .../weights_mixed_precision_tests.py          |   4 +-
 .../function_tests/test_custom_layer.py       |  12 +--
 .../function_tests/test_hmse_error_method.py  |  12 +--
 .../function_tests/test_layer_fusing.py       |   2 +-
 .../non_parallel_tests/test_keras_tp_model.py |  50 ++++-----
 .../function_tests/layer_fusing_test.py       |  56 +++++-----
 .../function_tests/test_pytorch_tp_model.py   |  38 +++----
 .../model_tests/base_pytorch_test.py          |   4 +-
 .../bn_attributes_quantization_test.py        |   2 +-
 .../feature_models/const_quantization_test.py |   2 +-
 .../dynamic_size_inputs_test.py               |   4 +-
 .../mixed_precision_activation_test.py        |   4 +-
 .../mixed_precision_weights_test.py           |   6 +-
 .../multi_head_attention_test.py              |   4 +-
 .../keras/example_keras_pruning_mnist.ipynb   |   2 +-
 .../keras/example_keras_qat.ipynb             |   4 +-
 53 files changed, 363 insertions(+), 363 deletions(-)

diff --git a/docsrc/source/api/api_docs/modules/target_platform.rst b/docsrc/source/api/api_docs/modules/target_platform.rst
index c60611dfa..4fe74dfb4 100644
--- a/docsrc/source/api/api_docs/modules/target_platform.rst
+++ b/docsrc/source/api/api_docs/modules/target_platform.rst
@@ -56,9 +56,9 @@ QuantizationConfigOptions
 .. autoclass:: model_compression_toolkit.target_platform.QuantizationConfigOptions
 
 
-TargetPlatformModel
+TargetPlatformCapabilities
 =======================
-.. autoclass:: model_compression_toolkit.target_platform.TargetPlatformModel
+.. autoclass:: model_compression_toolkit.target_platform.TargetPlatformCapabilities
 
 
 OperatorsSet
diff --git a/model_compression_toolkit/core/common/graph/base_node.py b/model_compression_toolkit/core/common/graph/base_node.py
index 3310d1d52..916f2f68e 100644
--- a/model_compression_toolkit/core/common/graph/base_node.py
+++ b/model_compression_toolkit/core/common/graph/base_node.py
@@ -539,7 +539,7 @@ def get_all_weights_attr_candidates(self, attr: str) -> List[WeightsAttrQuantiza
     def get_qco(self, tpc: FrameworkQuantizationCapabilities) -> QuantizationConfigOptions:
         """
         Get the QuantizationConfigOptions of the node according
-        to the mappings from layers/LayerFilterParams to the OperatorsSet in the TargetPlatformModel.
+        to the mappings from layers/LayerFilterParams to the OperatorsSet in the TargetPlatformCapabilities.
 
         Args:
             tpc: TPC to extract the QuantizationConfigOptions for the node.
diff --git a/model_compression_toolkit/core/keras/resource_utilization_data_facade.py b/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
index 4ad8a3409..493007d44 100644
--- a/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
+++ b/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
@@ -18,7 +18,7 @@
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization_data import compute_resource_utilization_data
 from model_compression_toolkit.verify_packages import FOUND_TF
@@ -39,7 +39,7 @@ def keras_resource_utilization_data(in_model: Model,
                                         representative_data_gen: Callable,
                                         core_config: CoreConfig = CoreConfig(
                                             mixed_precision_config=MixedPrecisionQuantizationConfig()),
-                                        target_platform_capabilities: TargetPlatformModel = KERAS_DEFAULT_TPC
+                                        target_platform_capabilities: TargetPlatformCapabilities = KERAS_DEFAULT_TPC
                                         ) -> ResourceUtilization:
         """
         Computes resource utilization data that can be used to calculate the desired target resource utilization
diff --git a/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py b/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py
index af4bb7e9c..ef5dc0a13 100644
--- a/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py
+++ b/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py
@@ -17,7 +17,7 @@
 
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.constants import PYTORCH
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization_data import compute_resource_utilization_data
@@ -41,7 +41,7 @@
     def pytorch_resource_utilization_data(in_model: Module,
                                           representative_data_gen: Callable,
                                           core_config: CoreConfig = CoreConfig(),
-                                          target_platform_capabilities: TargetPlatformModel= PYTORCH_DEFAULT_TPC
+                                          target_platform_capabilities: TargetPlatformCapabilities= PYTORCH_DEFAULT_TPC
                                           ) -> ResourceUtilization:
         """
         Computes resource utilization data that can be used to calculate the desired target resource utilization for mixed-precision quantization.
diff --git a/model_compression_toolkit/gptq/keras/quantization_facade.py b/model_compression_toolkit/gptq/keras/quantization_facade.py
index 9de539c47..feb9238a7 100644
--- a/model_compression_toolkit/gptq/keras/quantization_facade.py
+++ b/model_compression_toolkit/gptq/keras/quantization_facade.py
@@ -22,7 +22,7 @@
     LR_BIAS_DEFAULT, GPTQ_MOMENTUM, REG_DEFAULT_SLA
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.constants import TENSORFLOW, ACT_HESSIAN_DEFAULT_BATCH_SIZE, GPTQ_HESSIAN_NUM_SAMPLES
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from model_compression_toolkit.verify_packages import FOUND_TF
 from model_compression_toolkit.core.common.user_info import UserInformation
 from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig, GPTQHessianScoresConfig, \
@@ -155,7 +155,7 @@ def keras_gradient_post_training_quantization(in_model: Model, representative_da
                                                   gptq_representative_data_gen: Callable = None,
                                                   target_resource_utilization: ResourceUtilization = None,
                                                   core_config: CoreConfig = CoreConfig(),
-                                                  target_platform_capabilities: TargetPlatformModel = DEFAULT_KERAS_TPC) -> Tuple[Model, UserInformation]:
+                                                  target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_KERAS_TPC) -> Tuple[Model, UserInformation]:
         """
         Quantize a trained Keras model using post-training quantization. The model is quantized using a
         symmetric constraint quantization thresholds (power of two).
diff --git a/model_compression_toolkit/gptq/pytorch/quantization_facade.py b/model_compression_toolkit/gptq/pytorch/quantization_facade.py
index e886b48c3..60c1bb59a 100644
--- a/model_compression_toolkit/gptq/pytorch/quantization_facade.py
+++ b/model_compression_toolkit/gptq/pytorch/quantization_facade.py
@@ -31,7 +31,7 @@
 from model_compression_toolkit.gptq.runner import gptq_runner
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.metadata import create_model_metadata
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.verify_packages import FOUND_TORCH
 
@@ -144,7 +144,7 @@ def pytorch_gradient_post_training_quantization(model: Module,
                                                     core_config: CoreConfig = CoreConfig(),
                                                     gptq_config: GradientPTQConfig = None,
                                                     gptq_representative_data_gen: Callable = None,
-                                                    target_platform_capabilities: TargetPlatformModel = DEFAULT_PYTORCH_TPC):
+                                                    target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_PYTORCH_TPC):
         """
         Quantize a trained Pytorch module using post-training quantization.
         By default, the module is quantized using a symmetric constraint quantization thresholds
diff --git a/model_compression_toolkit/pruning/keras/pruning_facade.py b/model_compression_toolkit/pruning/keras/pruning_facade.py
index 7ea806755..beda9c3f2 100644
--- a/model_compression_toolkit/pruning/keras/pruning_facade.py
+++ b/model_compression_toolkit/pruning/keras/pruning_facade.py
@@ -17,7 +17,7 @@
 
 from model_compression_toolkit import get_target_platform_capabilities
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from model_compression_toolkit.verify_packages import FOUND_TF
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.core.common.pruning.pruner import Pruner
@@ -44,7 +44,7 @@ def keras_pruning_experimental(model: Model,
                                    target_resource_utilization: ResourceUtilization,
                                    representative_data_gen: Callable,
                                    pruning_config: PruningConfig = PruningConfig(),
-                                   target_platform_capabilities: TargetPlatformModel = DEFAULT_KERAS_TPC) -> Tuple[Model, PruningInfo]:
+                                   target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_KERAS_TPC) -> Tuple[Model, PruningInfo]:
         """
         Perform structured pruning on a Keras model to meet a specified target resource utilization.
         This function prunes the provided model according to the target resource utilization by grouping and pruning
diff --git a/model_compression_toolkit/pruning/pytorch/pruning_facade.py b/model_compression_toolkit/pruning/pytorch/pruning_facade.py
index be59db610..6c5433c30 100644
--- a/model_compression_toolkit/pruning/pytorch/pruning_facade.py
+++ b/model_compression_toolkit/pruning/pytorch/pruning_facade.py
@@ -16,7 +16,7 @@
 from typing import Callable, Tuple
 from model_compression_toolkit import get_target_platform_capabilities
 from model_compression_toolkit.constants import PYTORCH
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from model_compression_toolkit.verify_packages import FOUND_TORCH
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.core.common.pruning.pruner import Pruner
@@ -48,7 +48,7 @@ def pytorch_pruning_experimental(model: Module,
                                      target_resource_utilization: ResourceUtilization,
                                      representative_data_gen: Callable,
                                      pruning_config: PruningConfig = PruningConfig(),
-                                     target_platform_capabilities: TargetPlatformModel = DEFAULT_PYOTRCH_TPC) -> \
+                                     target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_PYOTRCH_TPC) -> \
             Tuple[Module, PruningInfo]:
         """
         Perform structured pruning on a Pytorch model to meet a specified target resource utilization.
diff --git a/model_compression_toolkit/ptq/keras/quantization_facade.py b/model_compression_toolkit/ptq/keras/quantization_facade.py
index d7d4ae682..86dcd14ac 100644
--- a/model_compression_toolkit/ptq/keras/quantization_facade.py
+++ b/model_compression_toolkit/ptq/keras/quantization_facade.py
@@ -22,7 +22,7 @@
 from model_compression_toolkit.core.common.visualization.tensorboard_writer import init_tensorboard_writer
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from model_compression_toolkit.verify_packages import FOUND_TF
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
@@ -52,7 +52,7 @@ def keras_post_training_quantization(in_model: Model,
                                          representative_data_gen: Callable,
                                          target_resource_utilization: ResourceUtilization = None,
                                          core_config: CoreConfig = CoreConfig(),
-                                         target_platform_capabilities: TargetPlatformModel = DEFAULT_KERAS_TPC):
+                                         target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_KERAS_TPC):
         """
          Quantize a trained Keras model using post-training quantization. The model is quantized using a
          symmetric constraint quantization thresholds (power of two).
diff --git a/model_compression_toolkit/ptq/pytorch/quantization_facade.py b/model_compression_toolkit/ptq/pytorch/quantization_facade.py
index c3835d1e6..4c9df057f 100644
--- a/model_compression_toolkit/ptq/pytorch/quantization_facade.py
+++ b/model_compression_toolkit/ptq/pytorch/quantization_facade.py
@@ -19,7 +19,7 @@
 from model_compression_toolkit.core.common.visualization.tensorboard_writer import init_tensorboard_writer
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.constants import PYTORCH
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from model_compression_toolkit.verify_packages import FOUND_TORCH
 from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
@@ -49,7 +49,7 @@ def pytorch_post_training_quantization(in_module: Module,
                                            representative_data_gen: Callable,
                                            target_resource_utilization: ResourceUtilization = None,
                                            core_config: CoreConfig = CoreConfig(),
-                                           target_platform_capabilities: TargetPlatformModel = DEFAULT_PYTORCH_TPC):
+                                           target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_PYTORCH_TPC):
         """
         Quantize a trained Pytorch module using post-training quantization.
         By default, the module is quantized using a symmetric constraint quantization thresholds
diff --git a/model_compression_toolkit/qat/keras/quantization_facade.py b/model_compression_toolkit/qat/keras/quantization_facade.py
index 92f52b0b7..c953dfa55 100644
--- a/model_compression_toolkit/qat/keras/quantization_facade.py
+++ b/model_compression_toolkit/qat/keras/quantization_facade.py
@@ -19,7 +19,7 @@
 from model_compression_toolkit.core import CoreConfig
 from model_compression_toolkit.core.common.visualization.tensorboard_writer import init_tensorboard_writer
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from model_compression_toolkit.verify_packages import FOUND_TF
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
@@ -93,7 +93,7 @@ def keras_quantization_aware_training_init_experimental(in_model: Model,
                                                             target_resource_utilization: ResourceUtilization = None,
                                                             core_config: CoreConfig = CoreConfig(),
                                                             qat_config: QATConfig = QATConfig(),
-                                                            target_platform_capabilities: TargetPlatformModel = DEFAULT_KERAS_TPC):
+                                                            target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_KERAS_TPC):
         """
          Prepare a trained Keras model for quantization aware training. First the model quantization is optimized
          with post-training quantization, then the model layers are wrapped with QuantizeWrappers. The model is
diff --git a/model_compression_toolkit/qat/pytorch/quantization_facade.py b/model_compression_toolkit/qat/pytorch/quantization_facade.py
index 30a2d0344..e66cb9dd9 100644
--- a/model_compression_toolkit/qat/pytorch/quantization_facade.py
+++ b/model_compression_toolkit/qat/pytorch/quantization_facade.py
@@ -17,7 +17,7 @@
 from functools import partial
 
 from model_compression_toolkit.constants import PYTORCH
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
 from model_compression_toolkit.verify_packages import FOUND_TORCH
@@ -82,7 +82,7 @@ def pytorch_quantization_aware_training_init_experimental(in_model: Module,
                                                               target_resource_utilization: ResourceUtilization = None,
                                                               core_config: CoreConfig = CoreConfig(),
                                                               qat_config: QATConfig = QATConfig(),
-                                                              target_platform_capabilities: TargetPlatformModel = DEFAULT_PYTORCH_TPC):
+                                                              target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_PYTORCH_TPC):
         """
          Prepare a trained Pytorch model for quantization aware training. First the model quantization is optimized
          with post-training quantization, then the model layers are wrapped with QuantizeWrappers. The model is
diff --git a/model_compression_toolkit/target_platform_capabilities/schema/mct_current_schema.py b/model_compression_toolkit/target_platform_capabilities/schema/mct_current_schema.py
index 4bc78dc91..a7fc5776b 100644
--- a/model_compression_toolkit/target_platform_capabilities/schema/mct_current_schema.py
+++ b/model_compression_toolkit/target_platform_capabilities/schema/mct_current_schema.py
@@ -9,4 +9,4 @@
 OperatorsSet = schema.OperatorsSet
 OperatorSetGroup = schema.OperatorSetGroup
 Fusing = schema.Fusing
-TargetPlatformModel = schema.TargetPlatformModel
+TargetPlatformCapabilities = schema.TargetPlatformCapabilities
diff --git a/model_compression_toolkit/target_platform_capabilities/schema/schema_functions.py b/model_compression_toolkit/target_platform_capabilities/schema/schema_functions.py
index 36b2001a9..c512f9a47 100644
--- a/model_compression_toolkit/target_platform_capabilities/schema/schema_functions.py
+++ b/model_compression_toolkit/target_platform_capabilities/schema/schema_functions.py
@@ -16,7 +16,7 @@
 from typing import Optional
 
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OpQuantizationConfig, \
-    TargetPlatformModel, QuantizationConfigOptions, OperatorsSetBase
+    TargetPlatformCapabilities, QuantizationConfigOptions, OperatorsSetBase
 
 
 def max_input_activation_n_bits(op_quantization_config: OpQuantizationConfig) -> int:
@@ -32,13 +32,13 @@ def max_input_activation_n_bits(op_quantization_config: OpQuantizationConfig) ->
     return max(op_quantization_config.supported_input_activation_n_bits)
 
 
-def get_config_options_by_operators_set(tp_model: TargetPlatformModel,
+def get_config_options_by_operators_set(tp_model: TargetPlatformCapabilities,
                                         operators_set_name: str) -> QuantizationConfigOptions:
     """
     Get the QuantizationConfigOptions of an OperatorsSet by its name.
 
     Args:
-        tp_model (TargetPlatformModel): The target platform model containing the operator sets and their configurations.
+        tp_model (TargetPlatformCapabilities): The target platform model containing the operator sets and their configurations.
         operators_set_name (str): The name of the OperatorsSet whose quantization configuration options are to be retrieved.
 
     Returns:
@@ -51,12 +51,12 @@ def get_config_options_by_operators_set(tp_model: TargetPlatformModel,
     return tp_model.default_qco
 
 
-def get_default_op_quantization_config(tp_model: TargetPlatformModel) -> OpQuantizationConfig:
+def get_default_op_quantization_config(tp_model: TargetPlatformCapabilities) -> OpQuantizationConfig:
     """
-    Get the default OpQuantizationConfig of the TargetPlatformModel.
+    Get the default OpQuantizationConfig of the TargetPlatformCapabilities.
 
     Args:
-        tp_model (TargetPlatformModel): The target platform model containing the default quantization configuration.
+        tp_model (TargetPlatformCapabilities): The target platform model containing the default quantization configuration.
 
     Returns:
         OpQuantizationConfig: The default quantization configuration.
@@ -70,12 +70,12 @@ def get_default_op_quantization_config(tp_model: TargetPlatformModel) -> OpQuant
     return tp_model.default_qco.quantization_configurations[0]
 
 
-def is_opset_in_model(tp_model: TargetPlatformModel, opset_name: str) -> bool:
+def is_opset_in_model(tp_model: TargetPlatformCapabilities, opset_name: str) -> bool:
     """
     Check whether an OperatorsSet is defined in the model.
 
     Args:
-        tp_model (TargetPlatformModel): The target platform model containing the list of operator sets.
+        tp_model (TargetPlatformCapabilities): The target platform model containing the list of operator sets.
         opset_name (str): The name of the OperatorsSet to check for existence.
 
     Returns:
@@ -84,12 +84,12 @@ def is_opset_in_model(tp_model: TargetPlatformModel, opset_name: str) -> bool:
     """
     return tp_model.operator_set is not None and opset_name in [x.name for x in tp_model.operator_set]
 
-def get_opset_by_name(tp_model: TargetPlatformModel, opset_name: str) -> Optional[OperatorsSetBase]:
+def get_opset_by_name(tp_model: TargetPlatformCapabilities, opset_name: str) -> Optional[OperatorsSetBase]:
     """
     Get an OperatorsSet object from the model by its name.
 
     Args:
-        tp_model (TargetPlatformModel): The target platform model containing the list of operator sets.
+        tp_model (TargetPlatformCapabilities): The target platform model containing the list of operator sets.
         opset_name (str): The name of the OperatorsSet to be retrieved.
 
     Returns:
@@ -101,5 +101,5 @@ def get_opset_by_name(tp_model: TargetPlatformModel, opset_name: str) -> Optiona
     """
     opset_list = [x for x in tp_model.operator_set if x.name == opset_name]
     if len(opset_list) > 1:
-        Logger.critical(f"Found more than one OperatorsSet in TargetPlatformModel with the name {opset_name}.") # pragma: no cover
+        Logger.critical(f"Found more than one OperatorsSet in TargetPlatformCapabilities with the name {opset_name}.") # pragma: no cover
     return opset_list[0] if opset_list else None
diff --git a/model_compression_toolkit/target_platform_capabilities/schema/v1.py b/model_compression_toolkit/target_platform_capabilities/schema/v1.py
index 5a3ae093b..c57f1fd7e 100644
--- a/model_compression_toolkit/target_platform_capabilities/schema/v1.py
+++ b/model_compression_toolkit/target_platform_capabilities/schema/v1.py
@@ -414,7 +414,7 @@ def get_info(self) -> Dict[str, Any]:
 
 class TargetPlatformModelComponent(BaseModel):
     """
-    Component of TargetPlatformModel (Fusing, OperatorsSet, etc.).
+    Component of TargetPlatformCapabilities (Fusing, OperatorsSet, etc.).
     """
     class Config:
         frozen = True
@@ -433,7 +433,7 @@ class OperatorsSet(OperatorsSetBase):
     Set of operators that are represented by a unique label.
 
     Attributes:
-        name (Union[str, OperatorSetNames]): The set's label (must be unique within a TargetPlatformModel).
+        name (Union[str, OperatorSetNames]): The set's label (must be unique within a TargetPlatformCapabilities).
         qc_options (Optional[QuantizationConfigOptions]): Configuration options to use for this set of operations.
             If None, it represents a fusing set.
         type (Literal["OperatorsSet"]): Fixed type identifier.
@@ -621,7 +621,7 @@ def get_info(self) -> Union[Dict[str, str], str]:
             for x in self.operator_groups
         ])
 
-class TargetPlatformModel(BaseModel):
+class TargetPlatformCapabilities(BaseModel):
     """
     Represents the hardware configuration used for quantized model inference.
 
@@ -682,10 +682,10 @@ def validate_after_initialization(cls, values: Dict[str, Any]) -> Dict[str, Any]
 
     def get_info(self) -> Dict[str, Any]:
         """
-        Get a dictionary summarizing the TargetPlatformModel properties.
+        Get a dictionary summarizing the TargetPlatformCapabilities properties.
 
         Returns:
-            Dict[str, Any]: Summary of the TargetPlatformModel properties.
+            Dict[str, Any]: Summary of the TargetPlatformCapabilities properties.
         """
         return {
             "Model name": self.name,
@@ -695,6 +695,6 @@ def get_info(self) -> Dict[str, Any]:
 
     def show(self):
         """
-        Display the TargetPlatformModel.
+        Display the TargetPlatformCapabilities.
         """
         pprint.pprint(self.get_info(), sort_dicts=False)
\ No newline at end of file
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py b/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py
index 006de8bca..d06da049d 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py
+++ b/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py
@@ -15,7 +15,7 @@
 
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import AttributeFilter
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities, OperationsSetToLayers, Smaller, SmallerEq, NotEq, Eq, GreaterEq, Greater, LayerFilterParams, OperationsToLayers, get_current_tpc
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, OperatorsSet, \
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OperatorsSet, \
     OperatorSetGroup, Signedness, AttributeQuantizationConfig, OpQuantizationConfig, QuantizationConfigOptions, Fusing
 
 from mct_quantizers import QuantizationMethod
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2fw.py b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2fw.py
index fb3f0af84..f3505c5ef 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2fw.py
+++ b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2fw.py
@@ -1,7 +1,7 @@
 from typing import Dict, Optional
 
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, \
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     OperatorsSet
 from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities, \
     OperationsSetToLayers
@@ -19,15 +19,15 @@ def __init__(self):
         # in the operation set are provided in the mapping,  a DefaultDict should be supplied to handle missing entries.
         self._opset2attr_mapping = None  # Mapping of operation sets to their corresponding framework-specific layers
 
-    def attach(self, tpc_model: TargetPlatformModel,
+    def attach(self, tpc_model: TargetPlatformCapabilities,
                custom_opset2layer: Optional[Dict[str, 'CustomOpsetLayers']] = None
                ) -> FrameworkQuantizationCapabilities:
         """
-        Attaching a TargetPlatformModel which includes a platform capabilities description to specific
+        Attaching a TargetPlatformCapabilities which includes a platform capabilities description to specific
         framework's operators.
 
         Args:
-            tpc_model: a TargetPlatformModel object.
+            tpc_model: a TargetPlatformCapabilities object.
             custom_opset2layer: optional set of custom operator sets which allows to add/override the built-in set
                 of framework operator, to define a specific behavior for those operators. This dictionary should map
                 an operator set unique name to a pair of: a list of framework operators and an optional
@@ -59,7 +59,7 @@ def attach(self, tpc_model: TargetPlatformModel,
                             attr_mapping = self._opset2attr_mapping.get(opset.name)
                             OperationsSetToLayers(opset.name, layers, attr_mapping=attr_mapping)
                     else:
-                        Logger.critical(f'{opset.name} is defined in TargetPlatformModel, '
+                        Logger.critical(f'{opset.name} is defined in TargetPlatformCapabilities, '
                                         f'but is not defined in the framework set of operators or in the provided '
                                         f'custom operator sets mapping.')
 
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities.py b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities.py
index 07a400836..8b1b4b9aa 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities.py
+++ b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities.py
@@ -26,7 +26,7 @@
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.framework_quantization_capabilities_component import FrameworkQuantizationCapabilitiesComponent
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.layer_filter_params import LayerFilterParams
 from model_compression_toolkit.target_platform_capabilities.immutable import ImmutableClass
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, OperatorsSetBase, \
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OperatorsSetBase, \
     OpQuantizationConfig, QuantizationConfigOptions
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.current_tpc import _current_tpc
 
@@ -36,18 +36,18 @@ class FrameworkQuantizationCapabilities(ImmutableClass):
     Attach framework information to a modeled hardware.
     """
     def __init__(self,
-                 tp_model: TargetPlatformModel,
+                 tp_model: TargetPlatformCapabilities,
                  name: str = "base"):
         """
 
         Args:
-            tp_model (TargetPlatformModel): Modeled hardware to attach framework information to.
+            tp_model (TargetPlatformCapabilities): Modeled hardware to attach framework information to.
             name (str): Name of the FrameworkQuantizationCapabilities.
         """
 
         super().__init__()
         self.name = name
-        assert isinstance(tp_model, TargetPlatformModel), f'Target platform model that was passed to FrameworkQuantizationCapabilities must be of type TargetPlatformModel, but has type of {type(tp_model)}'
+        assert isinstance(tp_model, TargetPlatformCapabilities), f'Target platform model that was passed to FrameworkQuantizationCapabilities must be of type TargetPlatformCapabilities, but has type of {type(tp_model)}'
         self.tp_model = tp_model
         self.op_sets_to_layers = OperationsToLayers() # Init an empty OperationsToLayers
         self.layer2qco, self.filterlayer2qco = {}, {} # Init empty mappings from layers/LayerFilterParams to QC options
@@ -164,7 +164,7 @@ def __exit__(self, exc_type, exc_value, tb):
     def get_default_op_qc(self) -> OpQuantizationConfig:
         """
 
-        Returns: The default OpQuantizationConfig of the TargetPlatformModel that is attached
+        Returns: The default OpQuantizationConfig of the TargetPlatformCapabilities that is attached
         to the FrameworkQuantizationCapabilities.
 
         """
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py
index b80dcff0b..9c742306c 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py
+++ b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py
@@ -57,7 +57,7 @@ def __repr__(self) -> str:
 
 class OperationsToLayers:
     """
-    Gather multiple OperationsSetToLayers to represent mapping of framework's layers to TargetPlatformModel OperatorsSet.
+    Gather multiple OperationsSetToLayers to represent mapping of framework's layers to TargetPlatformCapabilities OperatorsSet.
     """
     def __init__(self,
                  op_sets_to_layers: List[OperationsSetToLayers]=None):
@@ -142,7 +142,7 @@ def validate_op_sets(self):
             assert ops2layers.name not in existing_opset_names, f'OperationsSetToLayers names should be unique, but {ops2layers.name} appears to violate it.'
             existing_opset_names.append(ops2layers.name)
 
-            # Assert that a layer does not appear in more than a single OperatorsSet in the TargetPlatformModel.
+            # Assert that a layer does not appear in more than a single OperatorsSet in the TargetPlatformCapabilities.
             for layer in ops2layers.layers:
                 qco_by_opset_name = get_config_options_by_operators_set(_current_tpc.get().tp_model, ops2layers.name)
                 if layer in existing_layers:
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_io_handler.py b/model_compression_toolkit/target_platform_capabilities/tpc_io_handler.py
index 3ffb649d8..9d842da1e 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_io_handler.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_io_handler.py
@@ -16,27 +16,27 @@
 from typing import Union
 
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 import json
 
 
-def load_target_platform_model(tp_model_or_path: Union[TargetPlatformModel, str]) -> TargetPlatformModel:
+def load_target_platform_model(tp_model_or_path: Union[TargetPlatformCapabilities, str]) -> TargetPlatformCapabilities:
     """
-        Parses the tp_model input, which can be either a TargetPlatformModel object
+        Parses the tp_model input, which can be either a TargetPlatformCapabilities object
         or a string path to a JSON file.
 
         Parameters:
             tp_model_or_path (Union[TargetPlatformModel, str]): Input target platform model or path to .JSON file.
 
         Returns:
-            TargetPlatformModel: The parsed TargetPlatformModel.
+            TargetPlatformCapabilities: The parsed TargetPlatformCapabilities.
 
         Raises:
             FileNotFoundError: If the JSON file does not exist.
-            ValueError: If the JSON content is invalid or cannot initialize the TargetPlatformModel.
-            TypeError: If the input is neither a TargetPlatformModel nor a valid JSON file path.
+            ValueError: If the JSON content is invalid or cannot initialize the TargetPlatformCapabilities.
+            TypeError: If the input is neither a TargetPlatformCapabilities nor a valid JSON file path.
         """
-    if isinstance(tp_model_or_path, TargetPlatformModel):
+    if isinstance(tp_model_or_path, TargetPlatformCapabilities):
         return tp_model_or_path
 
     if isinstance(tp_model_or_path, str):
@@ -54,32 +54,32 @@ def load_target_platform_model(tp_model_or_path: Union[TargetPlatformModel, str]
             raise ValueError(f"Error reading the file '{tp_model_or_path}': {e.strerror}.") from e
 
         try:
-            return TargetPlatformModel.parse_raw(data)
+            return TargetPlatformCapabilities.parse_raw(data)
         except ValueError as e:
-            raise ValueError(f"Invalid JSON for loading TargetPlatformModel in '{tp_model_or_path}': {e}.") from e
+            raise ValueError(f"Invalid JSON for loading TargetPlatformCapabilities in '{tp_model_or_path}': {e}.") from e
         except Exception as e:
-            raise ValueError(f"Unexpected error while initializing TargetPlatformModel: {e}.") from e
+            raise ValueError(f"Unexpected error while initializing TargetPlatformCapabilities: {e}.") from e
 
     raise TypeError(
-        f"tp_model_or_path must be either a TargetPlatformModel instance or a string path to a JSON file, "
+        f"tp_model_or_path must be either a TargetPlatformCapabilities instance or a string path to a JSON file, "
         f"but received type '{type(tp_model_or_path).__name__}'."
     )
 
 
-def export_target_platform_model(model: TargetPlatformModel, export_path: Union[str, Path]) -> None:
+def export_target_platform_model(model: TargetPlatformCapabilities, export_path: Union[str, Path]) -> None:
     """
-    Exports a TargetPlatformModel instance to a JSON file.
+    Exports a TargetPlatformCapabilities instance to a JSON file.
 
     Parameters:
-        model (TargetPlatformModel): The TargetPlatformModel instance to export.
+        model (TargetPlatformCapabilities): The TargetPlatformCapabilities instance to export.
         export_path (Union[str, Path]): The file path to export the model to.
 
     Raises:
-        ValueError: If the model is not an instance of TargetPlatformModel.
+        ValueError: If the model is not an instance of TargetPlatformCapabilities.
         OSError: If there is an issue writing to the file.
     """
-    if not isinstance(model, TargetPlatformModel):
-        raise ValueError("The provided model is not a valid TargetPlatformModel instance.")
+    if not isinstance(model, TargetPlatformCapabilities):
+        raise ValueError("The provided model is not a valid TargetPlatformCapabilities instance.")
 
     path = Path(export_path)
     try:
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py
index c83e8ed00..f71ab0fe2 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py
@@ -15,7 +15,7 @@
 from model_compression_toolkit.constants import TENSORFLOW, PYTORCH
 from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL, IMX500_TP_MODEL, \
     TFLITE_TP_MODEL, QNNPACK_TP_MODEL
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1.tp_model import get_tp_model as get_tp_model_imx500_v1
 from model_compression_toolkit.target_platform_capabilities.tpc_models.tflite_tpc.v1.tp_model import get_tp_model as get_tp_model_tflite_v1
@@ -26,9 +26,9 @@
 
 def get_target_platform_capabilities(fw_name: str,
                                      target_platform_name: str,
-                                     target_platform_version: str = None) -> TargetPlatformModel:
+                                     target_platform_version: str = None) -> TargetPlatformCapabilities:
     """
-    This is a degenerated function that only returns the MCT default TargetPlatformModel object, to comply with the
+    This is a degenerated function that only returns the MCT default TargetPlatformCapabilities object, to comply with the
     existing TPC API.
 
     Args:
@@ -37,7 +37,7 @@ def get_target_platform_capabilities(fw_name: str,
         target_platform_version: Target platform capabilities version.
 
     Returns:
-        A default TargetPlatformModel object.
+        A default TargetPlatformCapabilities object.
     """
 
     assert fw_name in [TENSORFLOW, PYTORCH], f"Unsupported framework {fw_name}."
@@ -58,16 +58,16 @@ def get_target_platform_capabilities(fw_name: str,
     raise ValueError(f"Unsupported target platform name {target_platform_name}.")
 
 
-def get_tpc_model(name: str, tp_model: TargetPlatformModel):
+def get_tpc_model(name: str, tp_model: TargetPlatformCapabilities):
     """
-    This is a utility method that just returns the TargetPlatformModel that it receives, to support existing TPC API.
+    This is a utility method that just returns the TargetPlatformCapabilities that it receives, to support existing TPC API.
 
     Args:
-        name: the name of the TargetPlatformModel (not used in this function).
-        tp_model: a TargetPlatformModel to return.
+        name: the name of the TargetPlatformCapabilities (not used in this function).
+        tp_model: a TargetPlatformCapabilities to return.
 
     Returns:
-        The given TargetPlatformModel object.
+        The given TargetPlatformCapabilities object.
 
     """
 
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py
index 2bd8f6c1c..8aad40644 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py
@@ -19,13 +19,13 @@
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     IMX500_TP_MODEL
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, Signedness, \
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformModel:
+def get_tp_model() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
@@ -33,7 +33,7 @@ def get_tp_model() -> TargetPlatformModel:
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
     'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
@@ -45,7 +45,7 @@ def get_tp_model() -> TargetPlatformModel:
 
 def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
     """
-    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformModel.
+    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformCapabilities.
     In addition, creates a default configuration objects list (with 8, 4 and 2 bit quantization) to be used as
     default configuration for mixed-precision quantization.
 
@@ -134,19 +134,19 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 def generate_tp_model(default_config: OpQuantizationConfig,
                       base_config: OpQuantizationConfig,
                       mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformModel:
+                      name: str) -> TargetPlatformCapabilities:
     """
-    Generates TargetPlatformModel with default defined Operators Sets, based on the given base configuration and
+    Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
         default_config: A default OpQuantizationConfig to set as the TP model default configuration.
-        base_config: An OpQuantizationConfig to set as the TargetPlatformModel base configuration for mixed-precision purposes only.
+        base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
         mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
             quantization configuration options.
-        name: The name of the TargetPlatformModel.
+        name: The name of the TargetPlatformCapabilities.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     # Create a QuantizationConfigOptions, which defines a set
@@ -234,10 +234,10 @@ def generate_tp_model(default_config: OpQuantizationConfig,
     fusing_patterns.append(schema.Fusing(operator_groups=(fc, activations_after_fc_to_fuse)))
     fusing_patterns.append(schema.Fusing(operator_groups=(any_binary, any_relu)))
 
-    # Create a TargetPlatformModel and set its default quantization config.
+    # Create a TargetPlatformCapabilities and set its default quantization config.
     # This default configuration will be used for all operations
     # unless specified otherwise (see OperatorsSet, for example):
-    generated_tpc = schema.TargetPlatformModel(
+    generated_tpc = schema.TargetPlatformCapabilities(
         default_qco=default_configuration_options,
         tpc_minor_version=1,
         tpc_patch_version=0,
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py
index e500d2977..f2d5572dd 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py
@@ -18,14 +18,14 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, QNNPACK_TP_MODEL
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, \
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformModel:
+def get_tp_model() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
@@ -33,7 +33,7 @@ def get_tp_model() -> TargetPlatformModel:
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
     'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
@@ -45,7 +45,7 @@ def get_tp_model() -> TargetPlatformModel:
 
 def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
     """
-    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformModel.
+    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformCapabilities.
     In addition, creates a default configuration objects list (with 8, 4 and 2 bit quantization) to be used as
     default configuration for mixed-precision quantization.
 
@@ -120,19 +120,19 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 def generate_tp_model(default_config: OpQuantizationConfig,
                       base_config: OpQuantizationConfig,
                       mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformModel:
+                      name: str) -> TargetPlatformCapabilities:
     """
-    Generates TargetPlatformModel with default defined Operators Sets, based on the given base configuration and
+    Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
         default_config: A default OpQuantizationConfig to set as the TP model default configuration.
-        base_config: An OpQuantizationConfig to set as the TargetPlatformModel base configuration for mixed-precision purposes only.
+        base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
         mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
             quantization configuration options.
-        name: The name of the TargetPlatformModel.
+        name: The name of the TargetPlatformCapabilities.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     # Create a QuantizationConfigOptions, which defines a set
@@ -171,10 +171,10 @@ def generate_tp_model(default_config: OpQuantizationConfig,
     fusing_patterns.append(schema.Fusing(operator_groups=(conv_opset_concat, relu_opset_concat)))
     fusing_patterns.append(schema.Fusing(operator_groups=(linear, relu_opset_concat)))
 
-    # Create a TargetPlatformModel and set its default quantization config.
+    # Create a TargetPlatformCapabilities and set its default quantization config.
     # This default configuration will be used for all operations
     # unless specified otherwise (see OperatorsSet, for example):
-    generated_tpc = schema.TargetPlatformModel(
+    generated_tpc = schema.TargetPlatformCapabilities(
         default_qco=default_configuration_options,
         tpc_minor_version=1,
         tpc_patch_version=0,
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py
index 720781739..d3b47ce2d 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py
@@ -18,13 +18,13 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import BIAS_ATTR, KERNEL_ATTR, TFLITE_TP_MODEL
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, Signedness, \
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformModel:
+def get_tp_model() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
@@ -32,7 +32,7 @@ def get_tp_model() -> TargetPlatformModel:
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
     'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
@@ -44,7 +44,7 @@ def get_tp_model() -> TargetPlatformModel:
 
 def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
     """
-    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformModel.
+    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformCapabilities.
     In addition, creates a default configuration objects list (with 8, 4 and 2 bit quantization) to be used as
     default configuration for mixed-precision quantization.
 
@@ -117,19 +117,19 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 def generate_tp_model(default_config: OpQuantizationConfig,
                       base_config: OpQuantizationConfig,
                       mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformModel:
+                      name: str) -> TargetPlatformCapabilities:
     """
-    Generates TargetPlatformModel with default defined Operators Sets, based on the given base configuration and
+    Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
         default_config: A default OpQuantizationConfig to set as the TP model default configuration.
-        base_config: An OpQuantizationConfig to set as the TargetPlatformModel base configuration for mixed-precision purposes only.
+        base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
         mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
             quantization configuration options.
-        name: The name of the TargetPlatformModel.
+        name: The name of the TargetPlatformCapabilities.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     # Create a QuantizationConfigOptions, which defines a set
@@ -209,10 +209,10 @@ def generate_tp_model(default_config: OpQuantizationConfig,
     fusing_patterns.append(schema.Fusing(operator_groups=(batch_norm, activations_to_fuse)))
     fusing_patterns.append(schema.Fusing(operator_groups=(batch_norm, add, activations_to_fuse)))
 
-    # Create a TargetPlatformModel and set its default quantization config.
+    # Create a TargetPlatformCapabilities and set its default quantization config.
     # This default configuration will be used for all operations
     # unless specified otherwise (see OperatorsSet, for example):
-    generated_tpc = schema.TargetPlatformModel(
+    generated_tpc = schema.TargetPlatformCapabilities(
         default_qco=default_configuration_options,
         tpc_minor_version=1,
         tpc_patch_version=0,
diff --git a/tests/common_tests/helpers/generate_test_tp_model.py b/tests/common_tests/helpers/generate_test_tp_model.py
index 0991ea323..8510f9a46 100644
--- a/tests/common_tests/helpers/generate_test_tp_model.py
+++ b/tests/common_tests/helpers/generate_test_tp_model.py
@@ -130,7 +130,7 @@ def generate_tp_model_with_activation_mp(base_cfg, default_config, mp_bitwidth_c
 
 def generate_custom_test_tp_model(name: str,
                                   base_cfg: OpQuantizationConfig,
-                                  base_tp_model: schema.TargetPlatformModel,
+                                  base_tp_model: schema.TargetPlatformCapabilities,
                                   operator_sets_dict: Dict[str, QuantizationConfigOptions] = None):
     default_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([base_cfg]))
 
@@ -154,7 +154,7 @@ def generate_custom_test_tp_model(name: str,
     for fusion in base_tp_model.fusing_patterns:
         fusing_patterns.append(schema.Fusing(operator_groups=fusion.operator_groups))
 
-    custom_tp_model = schema.TargetPlatformModel(
+    custom_tp_model = schema.TargetPlatformCapabilities(
         default_qco=default_configuration_options,
         tpc_minor_version=None,
         tpc_patch_version=None,
@@ -167,7 +167,7 @@ def generate_custom_test_tp_model(name: str,
 
 
 def generate_test_tpc(name: str,
-                      tp_model: schema.TargetPlatformModel,
+                      tp_model: schema.TargetPlatformCapabilities,
                       base_tpc: tp.FrameworkQuantizationCapabilities,
                       op_sets_to_layer_add: Dict[str, List[Any]] = None,
                       op_sets_to_layer_drop: Dict[str, List[Any]] = None,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v1/tp_model.py
index f3ee0b2c2..d026c4f0f 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1/tp_model.py
@@ -19,13 +19,13 @@
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     IMX500_TP_MODEL
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, Signedness, \
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformModel:
+def get_tp_model() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
@@ -33,7 +33,7 @@ def get_tp_model() -> TargetPlatformModel:
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
     'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
@@ -45,7 +45,7 @@ def get_tp_model() -> TargetPlatformModel:
 
 def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
     """
-    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformModel.
+    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformCapabilities.
     In addition, creates a default configuration objects list (with 8, 4 and 2 bit quantization) to be used as
     default configuration for mixed-precision quantization.
 
@@ -134,19 +134,19 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 def generate_tp_model(default_config: OpQuantizationConfig,
                       base_config: OpQuantizationConfig,
                       mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformModel:
+                      name: str) -> TargetPlatformCapabilities:
     """
-    Generates TargetPlatformModel with default defined Operators Sets, based on the given base configuration and
+    Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
         default_config: A default OpQuantizationConfig to set as the TP model default configuration.
-        base_config: An OpQuantizationConfig to set as the TargetPlatformModel base configuration for mixed-precision purposes only.
+        base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
         mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
             quantization configuration options.
-        name: The name of the TargetPlatformModel.
+        name: The name of the TargetPlatformCapabilities.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     # Create a QuantizationConfigOptions, which defines a set
@@ -235,10 +235,10 @@ def generate_tp_model(default_config: OpQuantizationConfig,
     fusing_patterns.append(schema.Fusing(operator_groups=(fc, activations_after_fc_to_fuse)))
     fusing_patterns.append(schema.Fusing(operator_groups=(any_binary, any_relu)))
 
-    # Create a TargetPlatformModel and set its default quantization config.
+    # Create a TargetPlatformCapabilities and set its default quantization config.
     # This default configuration will be used for all operations
     # unless specified otherwise (see OperatorsSet, for example):
-    generated_tpc = schema.TargetPlatformModel(
+    generated_tpc = schema.TargetPlatformCapabilities(
         default_qco=default_configuration_options,
         tpc_minor_version=1,
         tpc_patch_version=0,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tp_model.py
index 14d033d9d..76a7a9045 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tp_model.py
@@ -19,14 +19,14 @@
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     WEIGHTS_QUANTIZATION_METHOD, IMX500_TP_MODEL
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, \
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformModel:
+def get_tp_model() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
@@ -34,7 +34,7 @@ def get_tp_model() -> TargetPlatformModel:
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
     'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
@@ -46,7 +46,7 @@ def get_tp_model() -> TargetPlatformModel:
 
 def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
     """
-    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformModel.
+    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformCapabilities.
     In addition, creates a default configuration objects list (with 8, 4 and 2 bit quantization) to be used as
     default configuration for mixed-precision quantization with non-uniform quantizer for 2 and 4 bit candidates.
 
@@ -132,19 +132,19 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 def generate_tp_model(default_config: OpQuantizationConfig,
                       base_config: OpQuantizationConfig,
                       mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformModel:
+                      name: str) -> TargetPlatformCapabilities:
     """
-    Generates TargetPlatformModel with default defined Operators Sets, based on the given base configuration and
+    Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
         default_config: A default OpQuantizationConfig to set as the TP model default configuration.
-        base_config: An OpQuantizationConfig to set as the TargetPlatformModel base configuration for mixed-precision purposes only.
+        base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
         mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
             quantization configuration options.
-        name: The name of the TargetPlatformModel.
+        name: The name of the TargetPlatformCapabilities.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     # Create a QuantizationConfigOptions, which defines a set
@@ -267,10 +267,10 @@ def generate_tp_model(default_config: OpQuantizationConfig,
     fusing_patterns.append(schema.Fusing(operator_groups=(fc, activations_after_fc_to_fuse)))
     fusing_patterns.append(schema.Fusing(operator_groups=(any_binary, any_relu)))
 
-    # Create a TargetPlatformModel and set its default quantization config.
+    # Create a TargetPlatformCapabilities and set its default quantization config.
     # This default configuration will be used for all operations
     # unless specified otherwise (see OperatorsSet, for example):
-    generated_tpm = schema.TargetPlatformModel(
+    generated_tpm = schema.TargetPlatformCapabilities(
         default_qco=default_configuration_options,
         tpc_minor_version=2,
         tpc_patch_version=0,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tp_model.py
index 0dbd50dd3..a0e8bef02 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tp_model.py
@@ -19,14 +19,14 @@
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     IMX500_TP_MODEL
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, \
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformModel:
+def get_tp_model() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
@@ -34,7 +34,7 @@ def get_tp_model() -> TargetPlatformModel:
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
     'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
@@ -46,7 +46,7 @@ def get_tp_model() -> TargetPlatformModel:
 
 def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
     """
-    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformModel.
+    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformCapabilities.
     In addition, creates a default configuration objects list (with 8, 4 and 2 bit quantization) to be used as
     default configuration for mixed-precision quantization.
 
@@ -128,19 +128,19 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 def generate_tp_model(default_config: OpQuantizationConfig,
                       base_config: OpQuantizationConfig,
                       mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformModel:
+                      name: str) -> TargetPlatformCapabilities:
     """
-    Generates TargetPlatformModel with default defined Operators Sets, based on the given base configuration and
+    Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
         default_config: A default OpQuantizationConfig to set as the TP model default configuration.
-        base_config: An OpQuantizationConfig to set as the TargetPlatformModel base configuration for mixed-precision purposes only.
+        base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
         mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
             quantization configuration options.
-        name: The name of the TargetPlatformModel.
+        name: The name of the TargetPlatformCapabilities.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     # Create a QuantizationConfigOptions, which defines a set
@@ -263,10 +263,10 @@ def generate_tp_model(default_config: OpQuantizationConfig,
     fusing_patterns.append(schema.Fusing(operator_groups=(fc, activations_after_fc_to_fuse)))
     fusing_patterns.append(schema.Fusing(operator_groups=(any_binary, any_relu)))
 
-    # Create a TargetPlatformModel and set its default quantization config.
+    # Create a TargetPlatformCapabilities and set its default quantization config.
     # This default configuration will be used for all operations
     # unless specified otherwise (see OperatorsSet, for example):
-    generated_tpm = schema.TargetPlatformModel(
+    generated_tpm = schema.TargetPlatformCapabilities(
         default_qco=default_configuration_options,
         tpc_minor_version=2,
         tpc_patch_version=0,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v2/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v2/tp_model.py
index 5f8d7f85a..1f2abc67b 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v2/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v2/tp_model.py
@@ -19,14 +19,14 @@
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     IMX500_TP_MODEL
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, \
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformModel:
+def get_tp_model() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
@@ -35,7 +35,7 @@ def get_tp_model() -> TargetPlatformModel:
     'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
     This version enables metadata by default.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
@@ -48,7 +48,7 @@ def get_tp_model() -> TargetPlatformModel:
 def get_op_quantization_configs() -> \
         Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
     """
-    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformModel.
+    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformCapabilities.
     In addition, creates a default configuration objects list (with 8, 4 and 2 bit quantization) to be used as
     default configuration for mixed-precision quantization.
 
@@ -137,19 +137,19 @@ def get_op_quantization_configs() -> \
 def generate_tp_model(default_config: OpQuantizationConfig,
                       base_config: OpQuantizationConfig,
                       mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformModel:
+                      name: str) -> TargetPlatformCapabilities:
     """
-    Generates TargetPlatformModel with default defined Operators Sets, based on the given base configuration and
+    Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
         default_config: A default OpQuantizationConfig to set as the TP model default configuration.
-        base_config: An OpQuantizationConfig to set as the TargetPlatformModel base configuration for mixed-precision purposes only.
+        base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
         mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
             quantization configuration options.
-        name: The name of the TargetPlatformModel.
+        name: The name of the TargetPlatformCapabilities.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     # Create a QuantizationConfigOptions, which defines a set
@@ -246,10 +246,10 @@ def generate_tp_model(default_config: OpQuantizationConfig,
     fusing_patterns.append(schema.Fusing(operator_groups=(fc, activations_after_fc_to_fuse)))
     fusing_patterns.append(schema.Fusing(operator_groups=(any_binary, any_relu)))
 
-    # Create a TargetPlatformModel and set its default quantization config.
+    # Create a TargetPlatformCapabilities and set its default quantization config.
     # This default configuration will be used for all operations
     # unless specified otherwise (see OperatorsSet, for example):
-    generated_tpm = schema.TargetPlatformModel(
+    generated_tpm = schema.TargetPlatformCapabilities(
         default_qco=default_configuration_options,
         tpc_minor_version=2,
         tpc_patch_version=0,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tp_model.py
index f6a666a20..d951c6da8 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tp_model.py
@@ -19,14 +19,14 @@
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     WEIGHTS_QUANTIZATION_METHOD, IMX500_TP_MODEL
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, \
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformModel:
+def get_tp_model() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
@@ -35,7 +35,7 @@ def get_tp_model() -> TargetPlatformModel:
     'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
     This version enables metadata by default.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
@@ -48,7 +48,7 @@ def get_tp_model() -> TargetPlatformModel:
 def get_op_quantization_configs() -> \
         Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
     """
-    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformModel.
+    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformCapabilities.
     In addition, creates a default configuration objects list (with 8, 4 and 2 bit quantization) to be used as
     default configuration for mixed-precision quantization with non-uniform quantizer for 2 and 4 bit candidates.
 
@@ -134,19 +134,19 @@ def get_op_quantization_configs() -> \
 def generate_tp_model(default_config: OpQuantizationConfig,
                       base_config: OpQuantizationConfig,
                       mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformModel:
+                      name: str) -> TargetPlatformCapabilities:
     """
-    Generates TargetPlatformModel with default defined Operators Sets, based on the given base configuration and
+    Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
         default_config: A default OpQuantizationConfig to set as the TP model default configuration.
-        base_config: An OpQuantizationConfig to set as the TargetPlatformModel base configuration for mixed-precision purposes only.
+        base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
         mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
             quantization configuration options.
-        name: The name of the TargetPlatformModel.
+        name: The name of the TargetPlatformCapabilities.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     # Create a QuantizationConfigOptions, which defines a set
@@ -269,10 +269,10 @@ def generate_tp_model(default_config: OpQuantizationConfig,
     fusing_patterns.append(schema.Fusing(operator_groups=(fc, activations_after_fc_to_fuse)))
     fusing_patterns.append(schema.Fusing(operator_groups=(any_binary, any_relu)))
 
-    # Create a TargetPlatformModel and set its default quantization config.
+    # Create a TargetPlatformCapabilities and set its default quantization config.
     # This default configuration will be used for all operations
     # unless specified otherwise (see OperatorsSet, for example):
-    generated_tpm = schema.TargetPlatformModel(
+    generated_tpm = schema.TargetPlatformCapabilities(
         default_qco=default_configuration_options,
         tpc_minor_version=2,
         tpc_patch_version=0,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v3/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v3/tp_model.py
index 0c30b8b34..69a19f8be 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v3/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v3/tp_model.py
@@ -19,14 +19,14 @@
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     IMX500_TP_MODEL
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, \
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformModel:
+def get_tp_model() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
@@ -35,7 +35,7 @@ def get_tp_model() -> TargetPlatformModel:
     'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
     This version enables metadata by default.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
@@ -48,7 +48,7 @@ def get_tp_model() -> TargetPlatformModel:
 def get_op_quantization_configs() -> \
         Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
     """
-    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformModel.
+    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformCapabilities.
     In addition, creates a default configuration objects list (with 8, 4 and 2 bit quantization) to be used as
     default configuration for mixed-precision quantization.
 
@@ -137,19 +137,19 @@ def get_op_quantization_configs() -> \
 def generate_tp_model(default_config: OpQuantizationConfig,
                       base_config: OpQuantizationConfig,
                       mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformModel:
+                      name: str) -> TargetPlatformCapabilities:
     """
-    Generates TargetPlatformModel with default defined Operators Sets, based on the given base configuration and
+    Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
         default_config: A default OpQuantizationConfig to set as the TP model default configuration.
-        base_config: An OpQuantizationConfig to set as the TargetPlatformModel base configuration for mixed-precision purposes only.
+        base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
         mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
             quantization configuration options.
-        name: The name of the TargetPlatformModel.
+        name: The name of the TargetPlatformCapabilities.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     # Create a QuantizationConfigOptions, which defines a set
@@ -276,10 +276,10 @@ def generate_tp_model(default_config: OpQuantizationConfig,
     fusing_patterns.append(schema.Fusing(operator_groups=(fc, activations_after_fc_to_fuse)))
     fusing_patterns.append(schema.Fusing(operator_groups=(any_binary, any_relu)))
 
-    # Create a TargetPlatformModel and set its default quantization config.
+    # Create a TargetPlatformCapabilities and set its default quantization config.
     # This default configuration will be used for all operations
     # unless specified otherwise (see OperatorsSet, for example):
-    generated_tpm = schema.TargetPlatformModel(
+    generated_tpm = schema.TargetPlatformCapabilities(
         default_qco=default_configuration_options,
         tpc_minor_version=3,
         tpc_patch_version=0,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tp_model.py
index e5bfdb8f6..20de769e7 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tp_model.py
@@ -19,14 +19,14 @@
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     WEIGHTS_QUANTIZATION_METHOD, IMX500_TP_MODEL
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, \
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformModel:
+def get_tp_model() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
@@ -35,7 +35,7 @@ def get_tp_model() -> TargetPlatformModel:
     'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
     This version enables metadata by default.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
@@ -48,7 +48,7 @@ def get_tp_model() -> TargetPlatformModel:
 def get_op_quantization_configs() -> \
         Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
     """
-    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformModel.
+    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformCapabilities.
     In addition, creates a default configuration objects list (with 8, 4 and 2 bit quantization) to be used as
     default configuration for mixed-precision quantization with non-uniform quantizer for 2 and 4 bit candidates.
 
@@ -134,19 +134,19 @@ def get_op_quantization_configs() -> \
 def generate_tp_model(default_config: OpQuantizationConfig,
                       base_config: OpQuantizationConfig,
                       mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformModel:
+                      name: str) -> TargetPlatformCapabilities:
     """
-    Generates TargetPlatformModel with default defined Operators Sets, based on the given base configuration and
+    Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
         default_config: A default OpQuantizationConfig to set as the TP model default configuration.
-        base_config: An OpQuantizationConfig to set as the TargetPlatformModel base configuration for mixed-precision purposes only.
+        base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
         mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
             quantization configuration options.
-        name: The name of the TargetPlatformModel.
+        name: The name of the TargetPlatformCapabilities.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     # Create a QuantizationConfigOptions, which defines a set
@@ -301,10 +301,10 @@ def generate_tp_model(default_config: OpQuantizationConfig,
     fusing_patterns.append(schema.Fusing(operator_groups=(fc, activations_after_fc_to_fuse)))
     fusing_patterns.append(schema.Fusing(operator_groups=(any_binary, any_relu)))
 
-    # Create a TargetPlatformModel and set its default quantization config.
+    # Create a TargetPlatformCapabilities and set its default quantization config.
     # This default configuration will be used for all operations
     # unless specified otherwise (see OperatorsSet, for example):
-    generated_tpm = schema.TargetPlatformModel(
+    generated_tpm = schema.TargetPlatformCapabilities(
         default_qco=default_configuration_options,
         tpc_minor_version=3,
         tpc_patch_version=0,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v4/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v4/tp_model.py
index 0a32a5c1e..7389aecf6 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v4/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v4/tp_model.py
@@ -19,13 +19,13 @@
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     IMX500_TP_MODEL
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, \
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
 tp = mct.target_platform
 
-def get_tp_model() -> TargetPlatformModel:
+def get_tp_model() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
@@ -34,7 +34,7 @@ def get_tp_model() -> TargetPlatformModel:
     'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
     This version enables metadata by default.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
@@ -47,7 +47,7 @@ def get_tp_model() -> TargetPlatformModel:
 def get_op_quantization_configs() -> \
         Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
     """
-    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformModel.
+    Creates a default configuration object for 8-bit quantization, to be used to set a default TargetPlatformCapabilities.
     In addition, creates a default configuration objects list (with 8, 4 and 2 bit quantization) to be used as
     default configuration for mixed-precision quantization.
 
@@ -136,19 +136,19 @@ def get_op_quantization_configs() -> \
 def generate_tp_model(default_config: OpQuantizationConfig,
                       base_config: OpQuantizationConfig,
                       mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformModel:
+                      name: str) -> TargetPlatformCapabilities:
     """
-    Generates TargetPlatformModel with default defined Operators Sets, based on the given base configuration and
+    Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
         default_config: A default OpQuantizationConfig to set as the TP model default configuration.
-        base_config: An OpQuantizationConfig to set as the TargetPlatformModel base configuration for mixed-precision purposes only.
+        base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
         mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
             quantization configuration options.
-        name: The name of the TargetPlatformModel.
+        name: The name of the TargetPlatformCapabilities.
 
-    Returns: A TargetPlatformModel object.
+    Returns: A TargetPlatformCapabilities object.
 
     """
     # Create a QuantizationConfigOptions, which defines a set
@@ -310,10 +310,10 @@ def generate_tp_model(default_config: OpQuantizationConfig,
     fusing_patterns.append(schema.Fusing(operator_groups=(fc, activations_after_fc_to_fuse)))
     fusing_patterns.append(schema.Fusing(operator_groups=(any_binary, any_relu)))
 
-    # Create a TargetPlatformModel and set its default quantization config.
+    # Create a TargetPlatformCapabilities and set its default quantization config.
     # This default configuration will be used for all operations
     # unless specified otherwise (see OperatorsSet, for example):
-    generated_tpm = schema.TargetPlatformModel(
+    generated_tpm = schema.TargetPlatformCapabilities(
         default_qco=default_configuration_options,
         tpc_minor_version=4,
         tpc_patch_version=0,
diff --git a/tests/common_tests/test_tp_model.py b/tests/common_tests/test_tp_model.py
index fb37a693c..2e3670a93 100644
--- a/tests/common_tests/test_tp_model.py
+++ b/tests/common_tests/test_tp_model.py
@@ -45,14 +45,14 @@ def setUp(self):
         op2 = schema.OperatorsSet(name="opset2")
         op3 = schema.OperatorsSet(name="opset3")
         op12 = schema.OperatorSetGroup(operators_set=[op1, op2])
-        self.tp_model = schema.TargetPlatformModel(default_qco=TEST_QCO,
-                                                   operator_set=(op1, op2, op3),
-                                                   fusing_patterns=(schema.Fusing(operator_groups=(op12, op3)),
+        self.tp_model = schema.TargetPlatformCapabilities(default_qco=TEST_QCO,
+                                                          operator_set=(op1, op2, op3),
+                                                          fusing_patterns=(schema.Fusing(operator_groups=(op12, op3)),
                                                                     schema.Fusing(operator_groups=(op1, op2))),
-                                                   tpc_minor_version=1,
-                                                   tpc_patch_version=0,
-                                                   tpc_platform_type="dump_to_json",
-                                                   add_metadata=False)
+                                                          tpc_minor_version=1,
+                                                          tpc_patch_version=0,
+                                                          tpc_platform_type="dump_to_json",
+                                                          add_metadata=False)
 
         # Create invalid JSON file
         with open(self.invalid_json_file, "w") as file:
@@ -65,7 +65,7 @@ def tearDown(self):
                 os.remove(file)
 
     def test_valid_model_object(self):
-        """Test that a valid TargetPlatformModel object is returned unchanged."""
+        """Test that a valid TargetPlatformCapabilities object is returned unchanged."""
         result = load_target_platform_model(self.tp_model)
         self.assertEqual(self.tp_model, result)
 
@@ -73,7 +73,7 @@ def test_invalid_json_parsing(self):
         """Test that invalid JSON content raises a ValueError."""
         with self.assertRaises(ValueError) as context:
             load_target_platform_model(self.invalid_json_file)
-        self.assertIn("Invalid JSON for loading TargetPlatformModel in", str(context.exception))
+        self.assertIn("Invalid JSON for loading TargetPlatformCapabilities in", str(context.exception))
 
     def test_nonexistent_file(self):
         """Test that a nonexistent file raises FileNotFoundError."""
@@ -95,13 +95,13 @@ def test_non_json_extension(self):
 
     def test_invalid_input_type(self):
         """Test that an unsupported input type raises TypeError."""
-        invalid_input = 123  # Not a string or TargetPlatformModel
+        invalid_input = 123  # Not a string or TargetPlatformCapabilities
         with self.assertRaises(TypeError) as context:
             load_target_platform_model(invalid_input)
-        self.assertIn("must be either a TargetPlatformModel instance or a string path", str(context.exception))
+        self.assertIn("must be either a TargetPlatformCapabilities instance or a string path", str(context.exception))
 
     def test_valid_export(self):
-        """Test exporting a valid TargetPlatformModel instance to a file."""
+        """Test exporting a valid TargetPlatformCapabilities instance to a file."""
         export_target_platform_model(self.tp_model, self.valid_export_path)
         # Verify the file exists
         self.assertTrue(os.path.exists(self.valid_export_path))
@@ -115,7 +115,7 @@ def test_export_with_invalid_model(self):
         """Test that exporting an invalid model raises a ValueError."""
         with self.assertRaises(ValueError) as context:
             export_target_platform_model("not_a_model", self.valid_export_path)
-        self.assertIn("not a valid TargetPlatformModel instance", str(context.exception))
+        self.assertIn("not a valid TargetPlatformCapabilities instance", str(context.exception))
 
     def test_export_with_invalid_path(self):
         """Test that exporting to an invalid path raises an OSError."""
@@ -154,34 +154,34 @@ class TargetPlatformModelingTest(unittest.TestCase):
     def test_immutable_tp(self):
 
         with self.assertRaises(Exception) as e:
-            model = schema.TargetPlatformModel(default_qco=TEST_QCO,
-                                               operator_set=tuple([schema.OperatorsSet(name="opset")]),
-                                               tpc_minor_version=None,
-                                               tpc_patch_version=None,
-                                               tpc_platform_type=None,
-                                               add_metadata=False)
+            model = schema.TargetPlatformCapabilities(default_qco=TEST_QCO,
+                                                      operator_set=tuple([schema.OperatorsSet(name="opset")]),
+                                                      tpc_minor_version=None,
+                                                      tpc_patch_version=None,
+                                                      tpc_platform_type=None,
+                                                      add_metadata=False)
             model.operator_set = tuple()
-        self.assertEqual('"TargetPlatformModel" is immutable and does not support item assignment', str(e.exception))
+        self.assertEqual('"TargetPlatformCapabilities" is immutable and does not support item assignment', str(e.exception))
 
     def test_default_options_more_than_single_qc(self):
         test_qco = schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC, TEST_QC]), base_config=TEST_QC)
         with self.assertRaises(Exception) as e:
-            schema.TargetPlatformModel(default_qco=test_qco,
-                                       tpc_minor_version=None,
-                                       tpc_patch_version=None,
-                                       tpc_platform_type=None,
-                                       add_metadata=False)
+            schema.TargetPlatformCapabilities(default_qco=test_qco,
+                                              tpc_minor_version=None,
+                                              tpc_patch_version=None,
+                                              tpc_platform_type=None,
+                                              add_metadata=False)
         self.assertEqual('Default QuantizationConfigOptions must contain exactly one option.', str(e.exception))
 
     def test_tp_model_show(self):
-        tpm = schema.TargetPlatformModel(default_qco=TEST_QCO,
-                                         tpc_minor_version=None,
-                                         tpc_patch_version=None,
-                                         tpc_platform_type=None,
-                                         operator_set=tuple([schema.OperatorsSet(name="opA"), schema.OperatorsSet(name="opB")]),
-                                         fusing_patterns=tuple(
+        tpm = schema.TargetPlatformCapabilities(default_qco=TEST_QCO,
+                                                tpc_minor_version=None,
+                                                tpc_patch_version=None,
+                                                tpc_platform_type=None,
+                                                operator_set=tuple([schema.OperatorsSet(name="opA"), schema.OperatorsSet(name="opB")]),
+                                                fusing_patterns=tuple(
                                              [schema.Fusing(operator_groups=(schema.OperatorsSet(name="opA"), schema.OperatorsSet(name="opB")))]),
-                                         add_metadata=False)
+                                                add_metadata=False)
         tpm.show()
 
 class OpsetTest(unittest.TestCase):
@@ -190,13 +190,13 @@ def test_opset_qco(self):
         opset_name = "ops_3bit"
         qco_3bit = TEST_QCO.clone_and_edit(activation_n_bits=3)
         operator_set = [schema.OperatorsSet(name=opset_name, qc_options=qco_3bit)]
-        hm = schema.TargetPlatformModel(default_qco=TEST_QCO,
-                                        operator_set=tuple(operator_set),
-                                        tpc_minor_version=None,
-                                        tpc_patch_version=None,
-                                        tpc_platform_type=None,
-                                        add_metadata=False,
-                                        name='test')
+        hm = schema.TargetPlatformCapabilities(default_qco=TEST_QCO,
+                                               operator_set=tuple(operator_set),
+                                               tpc_minor_version=None,
+                                               tpc_patch_version=None,
+                                               tpc_platform_type=None,
+                                               add_metadata=False,
+                                               name='test')
         for op_qc in get_config_options_by_operators_set(hm, opset_name).quantization_configurations:
             self.assertEqual(op_qc.activation_n_bits, 3)
 
@@ -214,19 +214,19 @@ def test_opset_concat(self):
                                 qc_options=TEST_QCO.clone_and_edit(activation_n_bits=2))
         c = schema.OperatorsSet(name='opset_C')  # Just add it without using it in concat
         operator_set.extend([a, b, c])
-        hm = schema.TargetPlatformModel(default_qco=TEST_QCO,
-                                        operator_set=tuple(operator_set),
-                                        tpc_minor_version=None,
-                                        tpc_patch_version=None,
-                                        tpc_platform_type=None,
-                                        add_metadata=False,
-                                        name='test')
+        hm = schema.TargetPlatformCapabilities(default_qco=TEST_QCO,
+                                               operator_set=tuple(operator_set),
+                                               tpc_minor_version=None,
+                                               tpc_patch_version=None,
+                                               tpc_platform_type=None,
+                                               add_metadata=False,
+                                               name='test')
         self.assertEqual(len(hm.operator_set), 3)
         self.assertFalse(is_opset_in_model(hm, "opset_A_opset_B"))
 
     def test_non_unique_opset(self):
         with self.assertRaises(Exception) as e:
-            hm = schema.TargetPlatformModel(
+            hm = schema.TargetPlatformCapabilities(
                 default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])),
                 operator_set=tuple([schema.OperatorsSet(name="conv"), schema.OperatorsSet(name="conv")]),
                 tpc_minor_version=None,
@@ -282,7 +282,7 @@ class FusingTest(unittest.TestCase):
     def test_fusing_single_opset(self):
         add = schema.OperatorsSet(name="add")
         with self.assertRaises(Exception) as e:
-            hm = schema.TargetPlatformModel(
+            hm = schema.TargetPlatformCapabilities(
                 default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])),
                 operator_set=tuple([add]),
                 fusing_patterns=tuple([schema.Fusing(operator_groups=tuple([add]))]),
@@ -304,7 +304,7 @@ def test_fusing_contains(self):
         fusing_patterns.append(schema.Fusing(operator_groups=(conv, add)))
         fusing_patterns.append(schema.Fusing(operator_groups=(conv, add, tanh)))
 
-        hm = schema.TargetPlatformModel(
+        hm = schema.TargetPlatformCapabilities(
             default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])),
             operator_set=tuple(operator_set),
             fusing_patterns=tuple(fusing_patterns),
@@ -332,7 +332,7 @@ def test_fusing_contains_with_opset_concat(self):
         fusing_patterns.append(schema.Fusing(operator_groups=(conv, add_tanh)))
         fusing_patterns.append(schema.Fusing(operator_groups=(conv, add, tanh)))
 
-        hm = schema.TargetPlatformModel(
+        hm = schema.TargetPlatformCapabilities(
             default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])),
             operator_set=tuple(operator_set),
             fusing_patterns=tuple(fusing_patterns),
diff --git a/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tp_model.py b/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tp_model.py
index f7969ea51..83e346971 100644
--- a/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tp_model.py
+++ b/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tp_model.py
@@ -33,13 +33,13 @@
         Conv2DTranspose
 
 import model_compression_toolkit as mct
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, OpQuantizationConfig
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OpQuantizationConfig
 from tests.common_tests.helpers.tpcs_for_tests.v1.tp_model import generate_tp_model
 
 tp = mct.target_platform
 
 
-def get_tp_model(edit_weights_params_dict, edit_act_params_dict) -> TargetPlatformModel:
+def get_tp_model(edit_weights_params_dict, edit_act_params_dict) -> TargetPlatformCapabilities:
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
 
     updated_config = base_config.clone_and_edit(attr_to_edit={KERNEL_ATTR: edit_weights_params_dict},
@@ -63,12 +63,12 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     return eight_bits, mixed_precision_cfg_list, default_config
 
 
-def get_int8_tpc(edit_weights_params_dict={}, edit_act_params_dict={}) -> tp.TargetPlatformModel:
+def get_int8_tpc(edit_weights_params_dict={}, edit_act_params_dict={}) -> tp.TargetPlatformCapabilities:
     default_tp_model = get_tp_model(edit_weights_params_dict, edit_act_params_dict)
     return default_tp_model
 
 
-def generate_keras_tpc(name: str, tp_model: schema.TargetPlatformModel):
+def generate_keras_tpc(name: str, tp_model: schema.TargetPlatformCapabilities):
     keras_tpc = tp.FrameworkQuantizationCapabilities(tp_model)
 
     with keras_tpc:
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/bn_attributes_quantization_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/bn_attributes_quantization_test.py
index 52b957f2d..78ddc0846 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/bn_attributes_quantization_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/bn_attributes_quantization_test.py
@@ -82,7 +82,7 @@ def _generate_bn_quantized_tpm(quantize_linear):
     linear_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([linear_op_qc]))
     bn_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([bn_op_qc]))
 
-    generated_tpm = schema.TargetPlatformModel(
+    generated_tpm = schema.TargetPlatformCapabilities(
         default_qco=default_configuration_options,
         tpc_minor_version=None,
         tpc_patch_version=None,
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py
index 9d9f4de4b..4b7c41b0e 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py
@@ -676,7 +676,7 @@ def get_tpc(self):
             base_config=cfg,
         )
 
-        tp_model = schema.TargetPlatformModel(
+        tp_model = schema.TargetPlatformCapabilities(
             default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([cfg]), base_config=cfg),
             tpc_minor_version=None,
             tpc_patch_version=None,
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py b/tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py
index 7854b6f99..9839afe3f 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py
@@ -199,7 +199,7 @@ def get_tpc(self):
             base_config=two_bit_cfg,
         )
 
-        tp_model = schema.TargetPlatformModel(
+        tp_model = schema.TargetPlatformCapabilities(
             default_qco=weight_fixed_cfg,
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -522,7 +522,7 @@ def get_tpc(self):
             base_config=cfg,
         )
 
-        tp_model = schema.TargetPlatformModel(
+        tp_model = schema.TargetPlatformCapabilities(
             default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([cfg]), base_config=cfg),
             tpc_minor_version=None,
             tpc_patch_version=None,
diff --git a/tests/keras_tests/function_tests/test_custom_layer.py b/tests/keras_tests/function_tests/test_custom_layer.py
index c7fa6fba8..267e7122f 100644
--- a/tests/keras_tests/function_tests/test_custom_layer.py
+++ b/tests/keras_tests/function_tests/test_custom_layer.py
@@ -85,12 +85,12 @@ def get_tpc():
                                         qc_options=default_configuration_options.clone_and_edit(
                                             enable_activation_quantization=False)
                                         .clone_and_edit_weight_attribute(enable_weights_quantization=False))]
-    tp_model = schema.TargetPlatformModel(default_qco=default_configuration_options,
-                                          operator_set=tuple(operator_set),
-                                          tpc_minor_version=None,
-                                          tpc_patch_version=None,
-                                          tpc_platform_type=None,
-                                          add_metadata=False)
+    tp_model = schema.TargetPlatformCapabilities(default_qco=default_configuration_options,
+                                                 operator_set=tuple(operator_set),
+                                                 tpc_minor_version=None,
+                                                 tpc_patch_version=None,
+                                                 tpc_platform_type=None,
+                                                 add_metadata=False)
 
     return tp_model
 
diff --git a/tests/keras_tests/function_tests/test_hmse_error_method.py b/tests/keras_tests/function_tests/test_hmse_error_method.py
index 39558f643..2a4de3251 100644
--- a/tests/keras_tests/function_tests/test_hmse_error_method.py
+++ b/tests/keras_tests/function_tests/test_hmse_error_method.py
@@ -190,14 +190,14 @@ def _generate_bn_quantization_tpc(quant_method, per_channel):
                                              {GAMMA: AttributeQuantizationConfig(weights_n_bits=8,
                                                                                  enable_weights_quantization=True)})
 
-            tp_model = schema.TargetPlatformModel(default_qco=conv_qco,
-                                                  tpc_minor_version=None,
-                                                  tpc_patch_version=None,
-                                                  tpc_platform_type=None,
-                                                  operator_set=tuple(
+            tp_model = schema.TargetPlatformCapabilities(default_qco=conv_qco,
+                                                         tpc_minor_version=None,
+                                                         tpc_patch_version=None,
+                                                         tpc_platform_type=None,
+                                                         operator_set=tuple(
                                                       [schema.OperatorsSet(name="Linear", qc_options=conv_qco),
                                                        schema.OperatorsSet(name="BN", qc_options=bn_qco)]),
-                                                  add_metadata=False)
+                                                         add_metadata=False)
 
             return tp_model
 
diff --git a/tests/keras_tests/function_tests/test_layer_fusing.py b/tests/keras_tests/function_tests/test_layer_fusing.py
index 9068c805b..db548b188 100644
--- a/tests/keras_tests/function_tests/test_layer_fusing.py
+++ b/tests/keras_tests/function_tests/test_layer_fusing.py
@@ -83,7 +83,7 @@ def generate_base_tpc(operator_set, fusing_patterns):
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
     default_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple(
         [default_config]))
-    generated_tp = schema.TargetPlatformModel(
+    generated_tp = schema.TargetPlatformCapabilities(
         default_qco=default_configuration_options,
         tpc_minor_version=None,
         tpc_patch_version=None,
diff --git a/tests/keras_tests/non_parallel_tests/test_keras_tp_model.py b/tests/keras_tests/non_parallel_tests/test_keras_tp_model.py
index 8037e3380..a141cf378 100644
--- a/tests/keras_tests/non_parallel_tests/test_keras_tp_model.py
+++ b/tests/keras_tests/non_parallel_tests/test_keras_tp_model.py
@@ -106,7 +106,7 @@ def test_keras_layers_with_params(self):
     def test_get_layers_by_op(self):
         op_obj = schema.OperatorsSet(name='opsetA')
 
-        hm = schema.TargetPlatformModel(
+        hm = schema.TargetPlatformCapabilities(
             default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])),
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -125,7 +125,7 @@ def test_get_layers_by_opconcat(self):
         op_obj_a = schema.OperatorsSet(name='opsetA')
         op_obj_b = schema.OperatorsSet(name='opsetB')
         op_concat = schema.OperatorSetGroup(operators_set=[op_obj_a, op_obj_b])
-        hm = schema.TargetPlatformModel(
+        hm = schema.TargetPlatformCapabilities(
             default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])),
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -143,7 +143,7 @@ def test_get_layers_by_opconcat(self):
         self.assertEqual(fw_tp.get_layers_by_opset(op_concat), opset_layers_a + opset_layers_b)
 
     def test_layer_attached_to_multiple_opsets(self):
-        hm = schema.TargetPlatformModel(
+        hm = schema.TargetPlatformCapabilities(
             default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])),
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -161,7 +161,7 @@ def test_layer_attached_to_multiple_opsets(self):
         self.assertEqual('Found layer Conv2D in more than one OperatorsSet', str(e.exception))
 
     def test_filter_layer_attached_to_multiple_opsets(self):
-        hm = schema.TargetPlatformModel(
+        hm = schema.TargetPlatformCapabilities(
             default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])),
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -192,13 +192,13 @@ def test_qco_by_keras_layer(self):
         operator_set.append(schema.OperatorsSet(name="tanh", qc_options=sevenbit_qco))
         operator_set.append(schema.OperatorsSet(name="relu"))
 
-        tpm = schema.TargetPlatformModel(default_qco=default_qco,
-                                         tpc_minor_version=None,
-                                         tpc_patch_version=None,
-                                         tpc_platform_type=None,
-                                         operator_set=tuple(operator_set),
-                                         add_metadata=False,
-                                         name='test')
+        tpm = schema.TargetPlatformCapabilities(default_qco=default_qco,
+                                                tpc_minor_version=None,
+                                                tpc_patch_version=None,
+                                                tpc_platform_type=None,
+                                                operator_set=tuple(operator_set),
+                                                add_metadata=False,
+                                                name='test')
 
         tpc_keras = tp.FrameworkQuantizationCapabilities(tpm)
         with tpc_keras:
@@ -228,7 +228,7 @@ def test_qco_by_keras_layer(self):
     # TODO: need to test as part of attach to fw tests
     # def test_opset_not_in_tp(self):
     #     default_qco = schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC]))
-    #     hm = schema.TargetPlatformModel(default_qco=default_qco,
+    #     hm = schema.TargetPlatformCapabilities(default_qco=default_qco,
     #                                     tpc_minor_version=None,
     #                                     tpc_patch_version=None,
     #                                     tpc_platform_type=None,
@@ -251,13 +251,13 @@ def test_keras_fusing_patterns(self):
         fusing_patterns = [schema.Fusing(operator_groups=(a, b, c)),
                            schema.Fusing(operator_groups=(a, c))]
 
-        hm = schema.TargetPlatformModel(default_qco=default_qco,
-                                        tpc_minor_version=None,
-                                        tpc_patch_version=None,
-                                        tpc_platform_type=None,
-                                        operator_set=tuple(operator_set),
-                                        fusing_patterns=tuple(fusing_patterns),
-                                        add_metadata=False)
+        hm = schema.TargetPlatformCapabilities(default_qco=default_qco,
+                                               tpc_minor_version=None,
+                                               tpc_patch_version=None,
+                                               tpc_platform_type=None,
+                                               operator_set=tuple(operator_set),
+                                               fusing_patterns=tuple(fusing_patterns),
+                                               add_metadata=False)
 
         hm_keras = tp.FrameworkQuantizationCapabilities(hm)
         with hm_keras:
@@ -280,12 +280,12 @@ def test_keras_fusing_patterns(self):
 
     def test_get_default_op_qc(self):
         default_qco = schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC]))
-        tpm = schema.TargetPlatformModel(default_qco=default_qco,
-                                         tpc_minor_version=None,
-                                         tpc_patch_version=None,
-                                         tpc_platform_type=None,
-                                         operator_set=tuple([schema.OperatorsSet(name="opA")]),
-                                         add_metadata=False)
+        tpm = schema.TargetPlatformCapabilities(default_qco=default_qco,
+                                                tpc_minor_version=None,
+                                                tpc_patch_version=None,
+                                                tpc_platform_type=None,
+                                                operator_set=tuple([schema.OperatorsSet(name="opA")]),
+                                                add_metadata=False)
 
         tpc = tp.FrameworkQuantizationCapabilities(tpm)
         with tpc:
diff --git a/tests/pytorch_tests/function_tests/layer_fusing_test.py b/tests/pytorch_tests/function_tests/layer_fusing_test.py
index 12c31892f..1b859ec12 100644
--- a/tests/pytorch_tests/function_tests/layer_fusing_test.py
+++ b/tests/pytorch_tests/function_tests/layer_fusing_test.py
@@ -76,13 +76,13 @@ def get_tpc(self):
         operator_set = [conv, any_relu]
         # Define fusions
         fusing_patterns = [schema.Fusing(operator_groups=(conv, any_relu))]
-        generated_tp = schema.TargetPlatformModel(default_qco=default_configuration_options,
-                                                  tpc_minor_version=None,
-                                                  tpc_patch_version=None,
-                                                  tpc_platform_type=None,
-                                                  operator_set=tuple(operator_set),
-                                                  fusing_patterns=tuple(fusing_patterns),
-                                                  name='layer_fusing_test')
+        generated_tp = schema.TargetPlatformCapabilities(default_qco=default_configuration_options,
+                                                         tpc_minor_version=None,
+                                                         tpc_patch_version=None,
+                                                         tpc_platform_type=None,
+                                                         operator_set=tuple(operator_set),
+                                                         fusing_patterns=tuple(fusing_patterns),
+                                                         name='layer_fusing_test')
 
         return generated_tp
 
@@ -124,13 +124,13 @@ def get_tpc(self):
         operator_set = [conv, any_act]
         # Define fusions
         fusing_patterns = [schema.Fusing(operator_groups=(conv, any_act))]
-        generated_tp = schema.TargetPlatformModel(default_qco=default_configuration_options,
-                                                  tpc_minor_version=None,
-                                                  tpc_patch_version=None,
-                                                  tpc_platform_type=None,
-                                                  operator_set=tuple(operator_set),
-                                                  fusing_patterns=tuple(fusing_patterns),
-                                                  name='layer_fusing_test')
+        generated_tp = schema.TargetPlatformCapabilities(default_qco=default_configuration_options,
+                                                         tpc_minor_version=None,
+                                                         tpc_patch_version=None,
+                                                         tpc_platform_type=None,
+                                                         operator_set=tuple(operator_set),
+                                                         fusing_patterns=tuple(fusing_patterns),
+                                                         name='layer_fusing_test')
 
         return generated_tp
 
@@ -186,13 +186,13 @@ def get_tpc(self):
         operator_set = [conv, any_act]
         # Define fusions
         fusing_patterns = [schema.Fusing(operator_groups=(conv, any_act))]
-        generated_tp = schema.TargetPlatformModel(default_qco=default_configuration_options,
-                                                  tpc_minor_version=None,
-                                                  tpc_patch_version=None,
-                                                  tpc_platform_type=None,
-                                                  operator_set=tuple(operator_set),
-                                                  fusing_patterns=tuple(fusing_patterns),
-                                                  name='layer_fusing_test')
+        generated_tp = schema.TargetPlatformCapabilities(default_qco=default_configuration_options,
+                                                         tpc_minor_version=None,
+                                                         tpc_patch_version=None,
+                                                         tpc_platform_type=None,
+                                                         operator_set=tuple(operator_set),
+                                                         fusing_patterns=tuple(fusing_patterns),
+                                                         name='layer_fusing_test')
         return generated_tp
 
     def run_test(self, seed=0):
@@ -255,13 +255,13 @@ def get_tpc(self):
                            schema.Fusing(operator_groups=(conv, activations_to_fuse, add)),
                            schema.Fusing(operator_groups=(fc, activations_to_fuse))]
 
-        generated_tp = schema.TargetPlatformModel(default_qco=default_configuration_options,
-                                                  tpc_minor_version=None,
-                                                  tpc_patch_version=None,
-                                                  tpc_platform_type=None,
-                                                  operator_set=tuple(operator_set),
-                                                  fusing_patterns=tuple(fusing_patterns),
-                                                  name='layer_fusing_test')
+        generated_tp = schema.TargetPlatformCapabilities(default_qco=default_configuration_options,
+                                                         tpc_minor_version=None,
+                                                         tpc_patch_version=None,
+                                                         tpc_platform_type=None,
+                                                         operator_set=tuple(operator_set),
+                                                         fusing_patterns=tuple(fusing_patterns),
+                                                         name='layer_fusing_test')
 
         return generated_tp
 
diff --git a/tests/pytorch_tests/function_tests/test_pytorch_tp_model.py b/tests/pytorch_tests/function_tests/test_pytorch_tp_model.py
index 584509704..1df69af38 100644
--- a/tests/pytorch_tests/function_tests/test_pytorch_tp_model.py
+++ b/tests/pytorch_tests/function_tests/test_pytorch_tp_model.py
@@ -105,13 +105,13 @@ def test_qco_by_pytorch_layer(self):
 
         operator_set.append(schema.OperatorsSet(name="avg_pool2d"))
 
-        tpm = schema.TargetPlatformModel(default_qco=default_qco,
-                                         tpc_minor_version=None,
-                                         tpc_patch_version=None,
-                                         tpc_platform_type=None,
-                                         operator_set=tuple(operator_set),
-                                         add_metadata=False,
-                                         name='test')
+        tpm = schema.TargetPlatformCapabilities(default_qco=default_qco,
+                                                tpc_minor_version=None,
+                                                tpc_patch_version=None,
+                                                tpc_platform_type=None,
+                                                operator_set=tuple(operator_set),
+                                                add_metadata=False,
+                                                name='test')
 
         tpc_pytorch = tp.FrameworkQuantizationCapabilities(tpm)
         with tpc_pytorch:
@@ -147,7 +147,7 @@ def test_qco_by_pytorch_layer(self):
     def test_get_layers_by_op(self):
         op_obj = schema.OperatorsSet(name='opsetA')
 
-        hm = schema.TargetPlatformModel(
+        hm = schema.TargetPlatformCapabilities(
             default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])),
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -167,7 +167,7 @@ def test_get_layers_by_opconcat(self):
         op_obj_b = schema.OperatorsSet(name='opsetB')
         op_concat = schema.OperatorSetGroup(operators_set=[op_obj_a, op_obj_b])
 
-        hm = schema.TargetPlatformModel(
+        hm = schema.TargetPlatformCapabilities(
             default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])),
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -185,7 +185,7 @@ def test_get_layers_by_opconcat(self):
         self.assertEqual(fw_tp.get_layers_by_opset(op_concat), opset_layers_a + opset_layers_b)
 
     def test_layer_attached_to_multiple_opsets(self):
-        hm = schema.TargetPlatformModel(
+        hm = schema.TargetPlatformCapabilities(
             default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])),
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -203,7 +203,7 @@ def test_layer_attached_to_multiple_opsets(self):
         self.assertEqual('Found layer Conv2d in more than one OperatorsSet', str(e.exception))
 
     def test_filter_layer_attached_to_multiple_opsets(self):
-        hm = schema.TargetPlatformModel(
+        hm = schema.TargetPlatformCapabilities(
             default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC])),
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -222,7 +222,7 @@ def test_filter_layer_attached_to_multiple_opsets(self):
     # TODO: need to test as part of attach to fw tests
     # def test_opset_not_in_tp(self):
     #     default_qco = schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC]))
-    #     hm = schema.TargetPlatformModel(default_qco=default_qco,
+    #     hm = schema.TargetPlatformCapabilities(default_qco=default_qco,
     #                                     tpc_minor_version=None,
     #                                     tpc_patch_version=None,
     #                                     tpc_platform_type=None,
@@ -245,13 +245,13 @@ def test_pytorch_fusing_patterns(self):
         operator_set = [a, b, c]
         fusing_patterns = [schema.Fusing(operator_groups=(a, b, c)),
                            schema.Fusing(operator_groups=(a, c))]
-        hm = schema.TargetPlatformModel(default_qco=default_qco,
-                                        tpc_minor_version=None,
-                                        tpc_patch_version=None,
-                                        tpc_platform_type=None,
-                                        operator_set=tuple(operator_set),
-                                        fusing_patterns=tuple(fusing_patterns),
-                                        add_metadata=False)
+        hm = schema.TargetPlatformCapabilities(default_qco=default_qco,
+                                               tpc_minor_version=None,
+                                               tpc_patch_version=None,
+                                               tpc_platform_type=None,
+                                               operator_set=tuple(operator_set),
+                                               fusing_patterns=tuple(fusing_patterns),
+                                               add_metadata=False)
 
         hm_keras = tp.FrameworkQuantizationCapabilities(hm)
         with hm_keras:
diff --git a/tests/pytorch_tests/model_tests/base_pytorch_test.py b/tests/pytorch_tests/model_tests/base_pytorch_test.py
index a06d67a57..da861212e 100644
--- a/tests/pytorch_tests/model_tests/base_pytorch_test.py
+++ b/tests/pytorch_tests/model_tests/base_pytorch_test.py
@@ -21,7 +21,7 @@
 import torch
 import numpy as np
 
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from tests.common_tests.base_feature_test import BaseFeatureNetworkTest
 from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
 
@@ -136,7 +136,7 @@ def representative_data_gen_experimental():
                                            "mapping the test model name to a TPC object."
         for model_name in tpc_dict.keys():
             tpc = tpc_dict[model_name]
-            assert isinstance(tpc, TargetPlatformModel)
+            assert isinstance(tpc, TargetPlatformCapabilities)
 
             core_config = core_config_dict.get(model_name)
             assert core_config is not None, f"Model name {model_name} does not exists in the test's " \
diff --git a/tests/pytorch_tests/model_tests/feature_models/bn_attributes_quantization_test.py b/tests/pytorch_tests/model_tests/feature_models/bn_attributes_quantization_test.py
index ca1c09f3e..3a04b53a0 100644
--- a/tests/pytorch_tests/model_tests/feature_models/bn_attributes_quantization_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/bn_attributes_quantization_test.py
@@ -82,7 +82,7 @@ def _generate_bn_quantized_tpm(quantize_linear):
     linear_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([linear_op_qc]))
     bn_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([bn_op_qc]))
 
-    generated_tpm = schema.TargetPlatformModel(
+    generated_tpm = schema.TargetPlatformCapabilities(
         default_qco=default_configuration_options,
         tpc_minor_version=None,
         tpc_patch_version=None,
diff --git a/tests/pytorch_tests/model_tests/feature_models/const_quantization_test.py b/tests/pytorch_tests/model_tests/feature_models/const_quantization_test.py
index cda176144..7d7e44684 100644
--- a/tests/pytorch_tests/model_tests/feature_models/const_quantization_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/const_quantization_test.py
@@ -254,7 +254,7 @@ def get_tpc(self):
                                                    weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO))
         const_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([const_config]))
 
-        tp_model = schema.TargetPlatformModel(
+        tp_model = schema.TargetPlatformCapabilities(
             default_qco=default_configuration_options,
             tpc_minor_version=None,
             tpc_patch_version=None,
diff --git a/tests/pytorch_tests/model_tests/feature_models/dynamic_size_inputs_test.py b/tests/pytorch_tests/model_tests/feature_models/dynamic_size_inputs_test.py
index d1c9d3046..37e54a88a 100644
--- a/tests/pytorch_tests/model_tests/feature_models/dynamic_size_inputs_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/dynamic_size_inputs_test.py
@@ -18,7 +18,7 @@
 import torch
 
 import model_compression_toolkit as mct
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 
 """
@@ -110,7 +110,7 @@ def representative_data_gen_experimental():
         for model_name in tpc_dict.keys():
             tpc = tpc_dict[model_name]
 
-            assert isinstance(tpc, TargetPlatformModel)
+            assert isinstance(tpc, TargetPlatformCapabilities)
 
             core_config = core_config_dict.get(model_name)
             assert core_config is not None, f"Model name {model_name} does not exists in the test's " \
diff --git a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py
index e9d9e5718..0b9094557 100644
--- a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py
@@ -25,7 +25,7 @@
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, PYTORCH_KERNEL, \
     BIAS
 from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities, OperationsSetToLayers
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, OperatorsSet, \
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OperatorsSet, \
     QuantizationConfigOptions
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs
@@ -324,7 +324,7 @@ def get_tpc(self):
             base_config=cfg,
         )
 
-        tp_model = TargetPlatformModel(
+        tp_model = TargetPlatformCapabilities(
             default_qco=QuantizationConfigOptions(quantization_configurations=tuple([cfg]), base_config=cfg),
             tpc_minor_version=None,
             tpc_patch_version=None,
diff --git a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py
index 2c6202755..38c343bd0 100644
--- a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py
@@ -25,7 +25,7 @@
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, PYTORCH_KERNEL, BIAS_ATTR
 from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities, \
     OperationsSetToLayers
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel, OperatorsSet, \
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OperatorsSet, \
     QuantizationConfigOptions
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_tp_model, \
@@ -162,7 +162,7 @@ def get_tpc(self):
             base_config=two_bit_cfg,
         )
 
-        tp_model = schema.TargetPlatformModel(
+        tp_model = schema.TargetPlatformCapabilities(
             default_qco=weight_fixed_cfg,
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -319,7 +319,7 @@ def get_tpc(self):
             base_config=cfg,
         )
 
-        tp_model = TargetPlatformModel(
+        tp_model = TargetPlatformCapabilities(
             default_qco=QuantizationConfigOptions(quantization_configurations=tuple([cfg]), base_config=cfg),
             tpc_minor_version=None,
             tpc_patch_version=None,
diff --git a/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py b/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py
index 5bb7582e9..17c55037f 100644
--- a/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py
@@ -19,7 +19,7 @@
 import torch.nn as nn
 
 import model_compression_toolkit as mct
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformModel
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
@@ -105,7 +105,7 @@ def representative_data_gen():
         assert isinstance(tpc_dict, dict), "Pytorch tests get_tpc should return a dictionary " \
                                            "mapping the test model name to a TPC object."
         for model_name, tpc in tpc_dict.items():
-            assert isinstance(tpc, TargetPlatformModel)
+            assert isinstance(tpc, TargetPlatformCapabilities)
             assert model_name in core_configs_dict
             core_config = core_configs_dict[model_name]
 
diff --git a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_pruning_mnist.ipynb b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_pruning_mnist.ipynb
index e53a55365..82f69c275 100644
--- a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_pruning_mnist.ipynb
+++ b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_pruning_mnist.ipynb
@@ -245,7 +245,7 @@
     "\n",
     "    # Create the quantization configuration options and model\n",
     "    default_configuration_options = tp.QuantizationConfigOptions([default_config])\n",
-    "    tp_model = tp.TargetPlatformModel(default_configuration_options,\n",
+    "    tp_model = tp.TargetPlatformCapabilities(default_configuration_options,\n",
     "                                      tpc_minor_version=1,\n",
     "                                      tpc_patch_version=0,\n",
     "                                      tpc_platform_type=\"custom_pruning_notebook_tpc\")\n",
diff --git a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb
index 81f622371..f504fcb4b 100644
--- a/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb
+++ b/tutorials/notebooks/mct_features_notebooks/keras/example_keras_qat.ipynb
@@ -251,10 +251,10 @@
     "        simd_size=None,\n",
     "        signedness=Signedness.AUTO)\n",
     "\n",
-    "    # Set default QuantizationConfigOptions in new TargetPlatformModel to be used when no other\n",
+    "    # Set default QuantizationConfigOptions in new TargetPlatformCapabilities to be used when no other\n",
     "    # QuantizationConfigOptions is set for an OperatorsSet.\n",
     "    default_configuration_options = tp.QuantizationConfigOptions([default_config])\n",
-    "    tp_model = tp.TargetPlatformModel(default_configuration_options,\n",
+    "    tp_model = tp.TargetPlatformCapabilities(default_configuration_options,\n",
     "                                      tpc_minor_version=1,\n",
     "                                      tpc_patch_version=0,\n",
     "                                      tpc_platform_type=\"custom_qat_notebook_tpc\")\n",

From 8688708fc9f52923161469de9c9d8c5a82bf2f44 Mon Sep 17 00:00:00 2001
From: liord <lior.dikstein@altair-semi.com>
Date: Sun, 12 Jan 2025 11:37:00 +0200
Subject: [PATCH 05/18] Rename
 TargetPlatformCapabilities-->FrameworkQuantizationCapabilities, tpc->fqc

---
 .../core/common/fusion/layer_fusing.py        |  6 ++--
 .../core/common/graph/base_graph.py           | 28 +++++++++----------
 .../core/common/graph/base_node.py            | 26 ++++++++---------
 .../mixed_precision_candidates_filter.py      | 10 +++----
 .../resource_utilization_data.py              | 12 ++++----
 .../common/pruning/greedy_mask_calculator.py  | 10 +++----
 .../quantization/node_quantization_config.py  |  4 +--
 .../quantization/quantization_config.py       |  2 +-
 .../set_node_quantization_config.py           | 26 ++++++++---------
 .../shift_negative_activation.py              |  8 +++---
 .../core/graph_prep_runner.py                 | 20 ++++++-------
 .../back2framework/pytorch_model_builder.py   |  2 +-
 model_compression_toolkit/core/runner.py      | 10 +++----
 .../gptq/keras/quantization_facade.py         | 10 +++----
 .../gptq/pytorch/quantization_facade.py       | 10 +++----
 model_compression_toolkit/metadata.py         | 18 ++++++------
 .../pruning/pytorch/pruning_facade.py         |  8 +++---
 .../ptq/keras/quantization_facade.py          | 10 +++----
 .../ptq/pytorch/quantization_facade.py        | 10 +++----
 .../qat/keras/quantization_facade.py          |  4 +--
 .../qat/pytorch/quantization_facade.py        |  6 ++--
 .../xquant/common/model_folding_utils.py      | 10 +++----
 .../xquant/keras/keras_report_utils.py        |  4 +--
 .../xquant/pytorch/pytorch_report_utils.py    |  4 +--
 .../helpers/generate_test_tp_model.py         | 12 ++++----
 .../helpers/prep_graph_for_func_test.py       |  8 +++---
 tests/common_tests/test_tp_model.py           |  2 +-
 .../test_networks_runner_float.py             |  2 +-
 .../requires_mixed_precision_test.py          |  4 +--
 .../network_editor/edit_qc_test.py            |  4 +--
 .../second_moment_correction_test.py          |  7 +++--
 ...vation_weights_composition_substitution.py |  6 ++--
 .../test_cfg_candidates_filter.py             |  6 ++--
 ...test_sensitivity_metric_interest_points.py |  4 +--
 .../test_unsupported_custom_layer.py          |  2 +-
 .../test_lp_search_bitwidth.py                |  4 +--
 .../test_tensorboard_writer.py                |  4 +--
 tests/keras_tests/tpc_keras.py                |  2 +-
 38 files changed, 163 insertions(+), 162 deletions(-)

diff --git a/model_compression_toolkit/core/common/fusion/layer_fusing.py b/model_compression_toolkit/core/common/fusion/layer_fusing.py
index 0ae75941b..98303b207 100644
--- a/model_compression_toolkit/core/common/fusion/layer_fusing.py
+++ b/model_compression_toolkit/core/common/fusion/layer_fusing.py
@@ -77,18 +77,18 @@ def disable_nodes_activation_quantization(nodes: List[BaseNode]):
             qc.activation_quantization_cfg.enable_activation_quantization = False
 
 
-def fusion(graph: Graph, tpc: FrameworkQuantizationCapabilities) -> Graph:
+def fusion(graph: Graph, fqc: FrameworkQuantizationCapabilities) -> Graph:
     """
     Fusing defines a list of operators that should be combined and treated as a single operator,
     hence no quantization is applied between them when they appear in the graph.
     This function search and disable quantization for such patterns.
     Args:
         graph: Graph we apply the fusion on.
-        tpc: FrameworkQuantizationCapabilities object that describes the desired inference target platform (includes fusing patterns MCT should handle).
+        fqc: FrameworkQuantizationCapabilities object that describes the desired inference target platform (includes fusing patterns MCT should handle).
     Returns:
         Graph after applying fusion activation marking.
     """
-    fusing_patterns = tpc.get_fusing_patterns()
+    fusing_patterns = fqc.get_fusing_patterns()
     if len(fusing_patterns) == 0:
         return graph
 
diff --git a/model_compression_toolkit/core/common/graph/base_graph.py b/model_compression_toolkit/core/common/graph/base_graph.py
index b6abe7218..2b64787b2 100644
--- a/model_compression_toolkit/core/common/graph/base_graph.py
+++ b/model_compression_toolkit/core/common/graph/base_graph.py
@@ -86,29 +86,29 @@ def set_fw_info(self,
 
         self.fw_info = fw_info
 
-    def set_tpc(self,
-                tpc: FrameworkQuantizationCapabilities):
+    def set_fqc(self,
+                fqc: FrameworkQuantizationCapabilities):
         """
-        Set the graph's TPC.
+        Set the graph's FQC.
         Args:
-            tpc: FrameworkQuantizationCapabilities object.
+            fqc: FrameworkQuantizationCapabilities object.
         """
-        # validate graph nodes are either from the framework or a custom layer defined in the TPC
-        # Validate graph nodes are either built-in layers from the framework or custom layers defined in the TPC
-        tpc_layers = tpc.op_sets_to_layers.get_layers()
-        tpc_filtered_layers = [layer for layer in tpc_layers if isinstance(layer, LayerFilterParams)]
+        # validate graph nodes are either from the framework or a custom layer defined in the FQC
+        # Validate graph nodes are either built-in layers from the framework or custom layers defined in the FQC
+        fqc_layers = fqc.op_sets_to_layers.get_layers()
+        fqc_filtered_layers = [layer for layer in fqc_layers if isinstance(layer, LayerFilterParams)]
         for n in self.nodes:
-            is_node_in_tpc = any([n.is_match_type(_type) for _type in tpc_layers]) or \
-                             any([n.is_match_filter_params(filtered_layer) for filtered_layer in tpc_filtered_layers])
+            is_node_in_fqc = any([n.is_match_type(_type) for _type in fqc_layers]) or \
+                             any([n.is_match_filter_params(filtered_layer) for filtered_layer in fqc_filtered_layers])
             if n.is_custom:
-                if not is_node_in_tpc:
+                if not is_node_in_fqc:
                     Logger.critical(f'MCT does not support optimizing Keras custom layers. Found a layer of type {n.type}. '
-                                    ' Please add the custom layer to Target Platform Capabilities (TPC), or file a feature '
+                                    ' Please add the custom layer to Framework Quantization Capabilities (FQC), or file a feature '
                                     'request or an issue if you believe this should be supported.')  # pragma: no cover
-                if any([qc.default_weight_attr_config.enable_weights_quantization for qc in n.get_qco(tpc).quantization_configurations]):
+                if any([qc.default_weight_attr_config.enable_weights_quantization for qc in n.get_qco(fqc).quantization_configurations]):
                     Logger.critical(f'Layer identified: {n.type}. MCT does not support weight quantization for Keras custom layers.')  # pragma: no cover
 
-        self.tpc = tpc
+        self.fqc = fqc
 
     def get_topo_sorted_nodes(self):
         """
diff --git a/model_compression_toolkit/core/common/graph/base_node.py b/model_compression_toolkit/core/common/graph/base_node.py
index 916f2f68e..5b6b1e0e8 100644
--- a/model_compression_toolkit/core/common/graph/base_node.py
+++ b/model_compression_toolkit/core/common/graph/base_node.py
@@ -536,34 +536,34 @@ def get_all_weights_attr_candidates(self, attr: str) -> List[WeightsAttrQuantiza
         # the inner method would log an exception.
         return [c.weights_quantization_cfg.get_attr_config(attr) for c in self.candidates_quantization_cfg]
 
-    def get_qco(self, tpc: FrameworkQuantizationCapabilities) -> QuantizationConfigOptions:
+    def get_qco(self, fqc: FrameworkQuantizationCapabilities) -> QuantizationConfigOptions:
         """
         Get the QuantizationConfigOptions of the node according
         to the mappings from layers/LayerFilterParams to the OperatorsSet in the TargetPlatformCapabilities.
 
         Args:
-            tpc: TPC to extract the QuantizationConfigOptions for the node.
+            fqc: FQC to extract the QuantizationConfigOptions for the node.
 
         Returns:
             QuantizationConfigOptions of the node.
         """
 
-        if tpc is None:
-            Logger.critical(f'Can not retrieve QC options for None TPC')  # pragma: no cover
+        if fqc is None:
+            Logger.critical(f'Can not retrieve QC options for None FQC')  # pragma: no cover
 
-        for fl, qco in tpc.filterlayer2qco.items():
+        for fl, qco in fqc.filterlayer2qco.items():
             if self.is_match_filter_params(fl):
                 return qco
         # Extract qco with is_match_type to overcome mismatch of function types in TF 2.15
-        matching_qcos = [_qco for _type, _qco in tpc.layer2qco.items() if self.is_match_type(_type)]
+        matching_qcos = [_qco for _type, _qco in fqc.layer2qco.items() if self.is_match_type(_type)]
         if matching_qcos:
             if all([_qco == matching_qcos[0] for _qco in matching_qcos]):
                 return matching_qcos[0]
             else:
                 Logger.critical(f"Found duplicate qco types for node '{self.name}' of type '{self.type}'!")  # pragma: no cover
-        return tpc.tp_model.default_qco
+        return fqc.tp_model.default_qco
 
-    def filter_node_qco_by_graph(self, tpc: FrameworkQuantizationCapabilities,
+    def filter_node_qco_by_graph(self, fqc: FrameworkQuantizationCapabilities,
                                  next_nodes: List, node_qc_options: QuantizationConfigOptions
                                  ) -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig]]:
         """
@@ -573,7 +573,7 @@ def filter_node_qco_by_graph(self, tpc: FrameworkQuantizationCapabilities,
         filters out quantization config that don't comply to these attributes.
 
         Args:
-            tpc: TPC to extract the QuantizationConfigOptions for the next nodes.
+            fqc: FQC to extract the QuantizationConfigOptions for the next nodes.
             next_nodes: Output nodes of current node.
             node_qc_options: Node's QuantizationConfigOptions.
 
@@ -584,7 +584,7 @@ def filter_node_qco_by_graph(self, tpc: FrameworkQuantizationCapabilities,
         _base_config = node_qc_options.base_config
         _node_qc_options = node_qc_options.quantization_configurations
         if len(next_nodes):
-            next_nodes_qc_options = [_node.get_qco(tpc) for _node in next_nodes]
+            next_nodes_qc_options = [_node.get_qco(fqc) for _node in next_nodes]
             next_nodes_supported_input_bitwidth = min([max_input_activation_n_bits(op_cfg)
                                                        for qc_opts in next_nodes_qc_options
                                                        for op_cfg in qc_opts.quantization_configurations])
@@ -593,7 +593,7 @@ def filter_node_qco_by_graph(self, tpc: FrameworkQuantizationCapabilities,
             _node_qc_options = [_option for _option in _node_qc_options
                                 if _option.activation_n_bits <= next_nodes_supported_input_bitwidth]
             if len(_node_qc_options) == 0:
-                Logger.critical(f"Graph doesn't match TPC bit configurations: {self} -> {next_nodes}.")  # pragma: no cover
+                Logger.critical(f"Graph doesn't match FQC bit configurations: {self} -> {next_nodes}.")  # pragma: no cover
 
             # Verify base config match
             if any([node_qc_options.base_config.activation_n_bits > max_input_activation_n_bits(qc_opt.base_config)
@@ -603,9 +603,9 @@ def filter_node_qco_by_graph(self, tpc: FrameworkQuantizationCapabilities,
                 if len(_node_qc_options) > 0:
                     output_act_bitwidth = {qco.activation_n_bits: i for i, qco in enumerate(_node_qc_options)}
                     _base_config = _node_qc_options[output_act_bitwidth[max(output_act_bitwidth)]]
-                    Logger.warning(f"Node {self} base quantization config changed to match Graph and TPC configuration.\nCause: {self} -> {next_nodes}.")
+                    Logger.warning(f"Node {self} base quantization config changed to match Graph and FQC configuration.\nCause: {self} -> {next_nodes}.")
                 else:
-                    Logger.critical(f"Graph doesn't match TPC bit configurations: {self} -> {next_nodes}.")  # pragma: no cover
+                    Logger.critical(f"Graph doesn't match FQC bit configurations: {self} -> {next_nodes}.")  # pragma: no cover
 
         return _base_config, _node_qc_options
 
diff --git a/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py b/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py
index 146724e3c..f0308408b 100644
--- a/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py
+++ b/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py
@@ -23,12 +23,12 @@
 def filter_candidates_for_mixed_precision(graph: Graph,
                                           target_resource_utilization: ResourceUtilization,
                                           fw_info: FrameworkInfo,
-                                          tpc: FrameworkQuantizationCapabilities):
+                                          fqc: FrameworkQuantizationCapabilities):
     """
     Filters out candidates in case of mixed precision search for only weights or activation compression.
     For instance, if running only weights compression - filters out candidates of activation configurable nodes
     such that only a single candidate would remain, with the bitwidth equal to the one defined in the matching layer's
-    base config in the TPC.
+    base config in the FQC.
 
     Note: This function modifies the graph inplace!
 
@@ -36,7 +36,7 @@ def filter_candidates_for_mixed_precision(graph: Graph,
         graph: A graph representation of the model to be quantized.
         target_resource_utilization: The resource utilization of the target device.
         fw_info: fw_info: Information needed for quantization about the specific framework.
-        tpc: FrameworkQuantizationCapabilities object that describes the desired inference target platform.
+        fqc: FrameworkQuantizationCapabilities object that describes the desired inference target platform.
 
     """
 
@@ -50,7 +50,7 @@ def filter_candidates_for_mixed_precision(graph: Graph,
         weights_conf = graph.get_weights_configurable_nodes(fw_info)
         activation_configurable_nodes = [n for n in graph.get_activation_configurable_nodes() if n not in weights_conf]
         for n in activation_configurable_nodes:
-            base_cfg_nbits = n.get_qco(tpc).base_config.activation_n_bits
+            base_cfg_nbits = n.get_qco(fqc).base_config.activation_n_bits
             filtered_conf = [c for c in n.candidates_quantization_cfg if
                              c.activation_quantization_cfg.enable_activation_quantization and
                              c.activation_quantization_cfg.activation_n_bits == base_cfg_nbits]
@@ -67,7 +67,7 @@ def filter_candidates_for_mixed_precision(graph: Graph,
         weight_configurable_nodes = [n for n in graph.get_weights_configurable_nodes(fw_info) if n not in activation_conf]
         for n in weight_configurable_nodes:
             kernel_attr = graph.fw_info.get_kernel_op_attributes(n.type)[0]
-            base_cfg_nbits = n.get_qco(tpc).base_config.attr_weights_configs_mapping[kernel_attr].weights_n_bits
+            base_cfg_nbits = n.get_qco(fqc).base_config.attr_weights_configs_mapping[kernel_attr].weights_n_bits
             filtered_conf = [c for c in n.candidates_quantization_cfg if
                              c.weights_quantization_cfg.get_attr_config(kernel_attr).enable_weights_quantization and
                              c.weights_quantization_cfg.get_attr_config(kernel_attr).weights_n_bits == base_cfg_nbits]
diff --git a/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py b/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py
index 760a0a2c3..b37067bc9 100644
--- a/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py
+++ b/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py
@@ -33,7 +33,7 @@
 def compute_resource_utilization_data(in_model: Any,
                                       representative_data_gen: Callable,
                                       core_config: CoreConfig,
-                                      tpc: FrameworkQuantizationCapabilities,
+                                      fqc: FrameworkQuantizationCapabilities,
                                       fw_info: FrameworkInfo,
                                       fw_impl: FrameworkImplementation,
                                       transformed_graph: Graph = None,
@@ -47,7 +47,7 @@ def compute_resource_utilization_data(in_model: Any,
         in_model:  Model to build graph from (the model that intended to be quantized).
         representative_data_gen: Dataset used for calibration.
         core_config: CoreConfig containing parameters of how the model should be quantized.
-        tpc: FrameworkQuantizationCapabilities object that models the inference target platform and
+        fqc: FrameworkQuantizationCapabilities object that models the inference target platform and
                                               the attached framework operator's information.
         fw_info: Information needed for quantization about the specific framework.
         fw_impl: FrameworkImplementation object with a specific framework methods implementation.
@@ -70,7 +70,7 @@ def compute_resource_utilization_data(in_model: Any,
                                                      core_config.quantization_config,
                                                      fw_info,
                                                      fw_impl,
-                                                     tpc,
+                                                     fqc,
                                                      bit_width_config=core_config.bit_width_config,
                                                      mixed_precision_enable=mixed_precision_enable)
 
@@ -246,7 +246,7 @@ def requires_mixed_precision(in_model: Any,
                              target_resource_utilization: ResourceUtilization,
                              representative_data_gen: Callable,
                              core_config: CoreConfig,
-                             tpc: FrameworkQuantizationCapabilities,
+                             fqc: FrameworkQuantizationCapabilities,
                              fw_info: FrameworkInfo,
                              fw_impl: FrameworkImplementation) -> bool:
     """
@@ -261,7 +261,7 @@ def requires_mixed_precision(in_model: Any,
         target_resource_utilization: The resource utilization of the target device.
         representative_data_gen: A function that generates representative data for the model.
         core_config: CoreConfig containing parameters of how the model should be quantized.
-        tpc: FrameworkQuantizationCapabilities object that models the inference target platform and
+        fqc: FrameworkQuantizationCapabilities object that models the inference target platform and
                                               the attached framework operator's information.
         fw_info: Information needed for quantization about the specific framework.
         fw_impl: FrameworkImplementation object with a specific framework methods implementation.
@@ -276,7 +276,7 @@ def requires_mixed_precision(in_model: Any,
                                                  core_config.quantization_config,
                                                  fw_info,
                                                  fw_impl,
-                                                 tpc,
+                                                 fqc,
                                                  bit_width_config=core_config.bit_width_config,
                                                  mixed_precision_enable=False)
     # Compute max weights memory in bytes
diff --git a/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py b/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py
index e6367c4c6..7ecdedd78 100644
--- a/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py
+++ b/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py
@@ -42,7 +42,7 @@ def __init__(self,
                  target_resource_utilization: ResourceUtilization,
                  graph: Graph,
                  fw_impl: PruningFrameworkImplementation,
-                 tpc: FrameworkQuantizationCapabilities,
+                 fqc: FrameworkQuantizationCapabilities,
                  simd_groups_indices: Dict[BaseNode, List[List[int]]]):
         """
         Args:
@@ -52,7 +52,7 @@ def __init__(self,
             target_resource_utilization (ResourceUtilization): The target resource utilization to achieve.
             graph (Graph): The computational graph of the model.
             fw_impl (PruningFrameworkImplementation): Framework-specific implementation details.
-            tpc (FrameworkQuantizationCapabilities): Platform-specific constraints and capabilities.
+            fqc (FrameworkQuantizationCapabilities): Platform-specific constraints and capabilities.
             simd_groups_indices (Dict[BaseNode, List[List[int]]]): Indices of SIMD groups in each node.
         """
         self.prunable_nodes = prunable_nodes
@@ -60,7 +60,7 @@ def __init__(self,
         self.target_resource_utilization = target_resource_utilization
         self.graph = graph
         self.fw_impl = fw_impl
-        self.tpc = tpc
+        self.fqc = fqc
 
         self.simd_groups_indices = simd_groups_indices
         self.simd_groups_scores = simd_groups_scores
@@ -90,7 +90,7 @@ def compute_mask(self):
         """
         # Iteratively unprune the graph while monitoring the memory footprint.
         current_memory = self.memory_calculator.get_pruned_graph_memory(masks=self.oc_pruning_mask.get_mask(),
-                                                                        include_padded_channels=self.tpc.is_simd_padding)
+                                                                        include_padded_channels=self.fqc.is_simd_padding)
         if current_memory > self.target_resource_utilization.weights_memory:
             Logger.critical(f"Insufficient memory for the target resource utilization: current memory {current_memory}, "
                             f"target memory {self.target_resource_utilization.weights_memory}.")
@@ -105,7 +105,7 @@ def compute_mask(self):
                                                                group_index=group_to_remain_idx,
                                                                mask_indicator=MaskIndicator.REMAINED)
             current_memory = self.memory_calculator.get_pruned_graph_memory(masks=self.oc_pruning_mask.get_mask(),
-                                                                            include_padded_channels=self.tpc.is_simd_padding)
+                                                                            include_padded_channels=self.fqc.is_simd_padding)
 
         # If the target memory is exceeded, revert the last addition.
         if current_memory > self.target_resource_utilization.weights_memory:
diff --git a/model_compression_toolkit/core/common/quantization/node_quantization_config.py b/model_compression_toolkit/core/common/quantization/node_quantization_config.py
index cfc36698e..cad9c510a 100644
--- a/model_compression_toolkit/core/common/quantization/node_quantization_config.py
+++ b/model_compression_toolkit/core/common/quantization/node_quantization_config.py
@@ -401,9 +401,9 @@ def __init__(self, qc: QuantizationConfig,
                 # therefore, we need to look for the attribute in the op_cfg that is contained in the node attribute's name.
                 attrs_included_in_name = {k: v for k, v in op_cfg.attr_weights_configs_mapping.items() if k in attr}
                 if len(attrs_included_in_name) > 1:  # pragma: no cover
-                    Logger.critical(f"Found multiple attribute in TPC OpConfig that are contained "
+                    Logger.critical(f"Found multiple attribute in FQC OpConfig that are contained "
                                     f"in the attribute name '{attr}'."
-                                    f"Please fix the TPC attribute names mapping such that each operator's attribute would "
+                                    f"Please fix the FQC attribute names mapping such that each operator's attribute would "
                                     f"have a unique matching name.")
                 if len(attrs_included_in_name) == 0:
                     attr_cfg = op_cfg.default_weight_attr_config
diff --git a/model_compression_toolkit/core/common/quantization/quantization_config.py b/model_compression_toolkit/core/common/quantization/quantization_config.py
index f43831723..5c8df6b33 100644
--- a/model_compression_toolkit/core/common/quantization/quantization_config.py
+++ b/model_compression_toolkit/core/common/quantization/quantization_config.py
@@ -25,7 +25,7 @@
 class CustomOpsetLayers(NamedTuple):
     """
     This struct defines a set of operators from a specific framework, which will be used to configure a custom operator
-    set in the TPC.
+    set in the FQC.
 
     Args:
         operators: a list of framework operators to map to a certain custom opset name.
diff --git a/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py b/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py
index 2b9ffe5b6..d83e9e96f 100644
--- a/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py
+++ b/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py
@@ -71,14 +71,14 @@ def set_quantization_configuration_to_graph(graph: Graph,
                                          graph=graph,
                                          quant_config=quant_config,
                                          fw_info=graph.fw_info,
-                                         tpc=graph.tpc,
+                                         fqc=graph.fqc,
                                          mixed_precision_enable=mixed_precision_enable,
                                          manual_bit_width_override=nodes_to_manipulate_bit_widths.get(n))
     return graph
 
 
 def filter_node_qco_by_graph(node: BaseNode,
-                             tpc: FrameworkQuantizationCapabilities,
+                             fqc: FrameworkQuantizationCapabilities,
                              graph: Graph,
                              node_qc_options: QuantizationConfigOptions
                              ) -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig]]:
@@ -90,7 +90,7 @@ def filter_node_qco_by_graph(node: BaseNode,
 
     Args:
         node: Node for filtering.
-        tpc: TPC to extract the QuantizationConfigOptions for the next nodes.
+        fqc: FQC to extract the QuantizationConfigOptions for the next nodes.
         graph: Graph object.
         node_qc_options: Node's QuantizationConfigOptions.
 
@@ -108,7 +108,7 @@ def filter_node_qco_by_graph(node: BaseNode,
     next_nodes = []
     while len(_next_nodes):
         n = _next_nodes.pop(0)
-        qco = n.get_qco(tpc)
+        qco = n.get_qco(fqc)
         qp = [qc.quantization_preserving for qc in qco.quantization_configurations]
         if not all(qp) and any(qp):
             Logger.error(f'Attribute "quantization_preserving" should be the same for all QuantizaionConfigOptions in {n}.')
@@ -117,7 +117,7 @@ def filter_node_qco_by_graph(node: BaseNode,
         next_nodes.append(n)
 
     if len(next_nodes):
-        next_nodes_qc_options = [_node.get_qco(tpc) for _node in next_nodes]
+        next_nodes_qc_options = [_node.get_qco(fqc) for _node in next_nodes]
         next_nodes_supported_input_bitwidth = min([max_input_activation_n_bits(op_cfg)
                                                    for qc_opts in next_nodes_qc_options
                                                    for op_cfg in qc_opts.quantization_configurations])
@@ -126,7 +126,7 @@ def filter_node_qco_by_graph(node: BaseNode,
         _node_qc_options = [_option for _option in _node_qc_options
                             if _option.activation_n_bits <= next_nodes_supported_input_bitwidth]
         if len(_node_qc_options) == 0:
-            Logger.critical(f"Graph doesn't match TPC bit configurations: {node} -> {next_nodes}.")
+            Logger.critical(f"Graph doesn't match FQC bit configurations: {node} -> {next_nodes}.")
 
         # Verify base config match
         if any([node_qc_options.base_config.activation_n_bits > max_input_activation_n_bits(qc_opt.base_config)
@@ -136,9 +136,9 @@ def filter_node_qco_by_graph(node: BaseNode,
             if len(_node_qc_options) > 0:
                 output_act_bitwidth = {qco.activation_n_bits: i for i, qco in enumerate(_node_qc_options)}
                 _base_config = _node_qc_options[output_act_bitwidth[max(output_act_bitwidth)]]
-                Logger.warning(f"Node {node} base quantization config changed to match Graph and TPC configuration.\nCause: {node} -> {next_nodes}.")
+                Logger.warning(f"Node {node} base quantization config changed to match Graph and FQC configuration.\nCause: {node} -> {next_nodes}.")
             else:
-                Logger.critical(f"Graph doesn't match TPC bit configurations: {node} -> {next_nodes}.")  # pragma: no cover
+                Logger.critical(f"Graph doesn't match FQC bit configurations: {node} -> {next_nodes}.")  # pragma: no cover
 
     return _base_config, _node_qc_options
 
@@ -147,7 +147,7 @@ def set_quantization_configs_to_node(node: BaseNode,
                                      graph: Graph,
                                      quant_config: QuantizationConfig,
                                      fw_info: FrameworkInfo,
-                                     tpc: FrameworkQuantizationCapabilities,
+                                     fqc: FrameworkQuantizationCapabilities,
                                      mixed_precision_enable: bool = False,
                                      manual_bit_width_override: Optional[int] = None):
     """
@@ -158,12 +158,12 @@ def set_quantization_configs_to_node(node: BaseNode,
         graph (Graph): Model's internal representation graph.
         quant_config (QuantizationConfig): Quantization configuration to generate the node's configurations from.
         fw_info (FrameworkInfo): Information needed for quantization about the specific framework.
-        tpc (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to get default OpQuantizationConfig.
+        fqc (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to get default OpQuantizationConfig.
         mixed_precision_enable (bool): Whether mixed precision is enabled. Defaults to False.
         manual_bit_width_override (Optional[int]): Specifies a custom bit-width to override the node's activation bit-width. Defaults to None.
     """
-    node_qc_options = node.get_qco(tpc)
-    base_config, node_qc_options_list = filter_node_qco_by_graph(node, tpc, graph, node_qc_options)
+    node_qc_options = node.get_qco(fqc)
+    base_config, node_qc_options_list = filter_node_qco_by_graph(node, fqc, graph, node_qc_options)
 
     # If a manual_bit_width_override is given, filter node_qc_options_list to retain only the options with activation bits equal to manual_bit_width_override,
     # and update base_config accordingly.
@@ -257,7 +257,7 @@ def _create_node_single_candidate_qc(qc: QuantizationConfig,
     attrs_with_enabled_quantization = [attr for attr, cfg in op_cfg.attr_weights_configs_mapping.items()
                                        if cfg.enable_weights_quantization]
     if len(attrs_with_enabled_quantization) > 1:
-        Logger.warning(f"Multiple weights attributes quantization is enabled via the provided TPC."
+        Logger.warning(f"Multiple weights attributes quantization is enabled via the provided FQC."
                        f"Quantizing any attribute other than the kernel is experimental "
                        f"and may be subject to unstable behavior."
                        f"Attributes with enabled weights quantization: {attrs_with_enabled_quantization}.")
diff --git a/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py b/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py
index a04906b30..1db9fce20 100644
--- a/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py
+++ b/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py
@@ -359,7 +359,7 @@ def shift_negative_function(graph: Graph,
                                          node=pad_node,
                                          graph=graph,
                                          quant_config=core_config.quantization_config,
-                                         tpc=graph.tpc,
+                                         fqc=graph.fqc,
                                          mixed_precision_enable=core_config.is_mixed_precision_enabled)
 
         for candidate_qc in pad_node.candidates_quantization_cfg:
@@ -376,7 +376,7 @@ def shift_negative_function(graph: Graph,
                                      node=add_node,
                                      graph=graph,
                                      quant_config=core_config.quantization_config,
-                                     tpc=graph.tpc,
+                                     fqc=graph.fqc,
                                      mixed_precision_enable=core_config.is_mixed_precision_enabled)
 
     original_non_linear_activation_nbits = non_linear_node_cfg_candidate.activation_n_bits
@@ -392,7 +392,7 @@ def shift_negative_function(graph: Graph,
                     bypass_candidate_qc.activation_quantization_cfg.activation_quantization_params[SIGNED] = False
                     graph.shift_stats_collector(bypass_node, np.array(shift_value))
 
-    add_node_qco = add_node.get_qco(graph.tpc).quantization_configurations
+    add_node_qco = add_node.get_qco(graph.fqc).quantization_configurations
     for op_qc_idx, candidate_qc in enumerate(add_node.candidates_quantization_cfg):
         for attr in add_node.get_node_weights_attributes():
             candidate_qc.weights_quantization_cfg.get_attr_config(attr).enable_weights_quantization = False
@@ -533,7 +533,7 @@ def apply_shift_negative_correction(graph: Graph,
     nodes = list(graph.nodes())
     for n in nodes:
         # Skip substitution if QuantizationMethod is uniform.
-        node_qco = n.get_qco(graph.tpc)
+        node_qco = n.get_qco(graph.fqc)
         if any([op_qc.activation_quantization_method is QuantizationMethod.UNIFORM
                 for op_qc in node_qco.quantization_configurations]):
             continue
diff --git a/model_compression_toolkit/core/graph_prep_runner.py b/model_compression_toolkit/core/graph_prep_runner.py
index cdeb42901..3f9027330 100644
--- a/model_compression_toolkit/core/graph_prep_runner.py
+++ b/model_compression_toolkit/core/graph_prep_runner.py
@@ -38,7 +38,7 @@ def graph_preparation_runner(in_model: Any,
                              quantization_config: QuantizationConfig,
                              fw_info: FrameworkInfo,
                              fw_impl: FrameworkImplementation,
-                             tpc: FrameworkQuantizationCapabilities,
+                             fqc: FrameworkQuantizationCapabilities,
                              bit_width_config: BitWidthConfig = None,
                              tb_w: TensorboardWriter = None,
                              mixed_precision_enable: bool = False,
@@ -58,7 +58,7 @@ def graph_preparation_runner(in_model: Any,
         fw_info (FrameworkInfo): Information needed for quantization about the specific framework (e.g., kernel channels indices,
             groups of layers by how they should be quantized, etc.).
         fw_impl (FrameworkImplementation): FrameworkImplementation object with a specific framework methods implementation.
-        tpc (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities object that models the inference target platform and
+        fqc (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities object that models the inference target platform and
             the attached framework operator's information.
         bit_width_config (BitWidthConfig): Config for bit-width selection. Defaults to None.
         tb_w (TensorboardWriter): TensorboardWriter object for logging.
@@ -71,7 +71,7 @@ def graph_preparation_runner(in_model: Any,
 
     graph = read_model_to_graph(in_model,
                                 representative_data_gen,
-                                tpc,
+                                fqc,
                                 fw_info,
                                 fw_impl)
 
@@ -79,7 +79,7 @@ def graph_preparation_runner(in_model: Any,
         tb_w.add_graph(graph, 'initial_graph')
 
     transformed_graph = get_finalized_graph(graph,
-                                            tpc,
+                                            fqc,
                                             quantization_config,
                                             bit_width_config,
                                             fw_info,
@@ -92,7 +92,7 @@ def graph_preparation_runner(in_model: Any,
 
 
 def get_finalized_graph(initial_graph: Graph,
-                        tpc: FrameworkQuantizationCapabilities,
+                        fqc: FrameworkQuantizationCapabilities,
                         quant_config: QuantizationConfig = DEFAULTCONFIG,
                         bit_width_config: BitWidthConfig = None,
                         fw_info: FrameworkInfo = None,
@@ -106,7 +106,7 @@ def get_finalized_graph(initial_graph: Graph,
 
     Args:
         initial_graph (Graph): Graph to apply the changes to.
-        tpc (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities object that describes the desired inference target platform (includes fusing patterns MCT should handle).
+        fqc (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities object that describes the desired inference target platform (includes fusing patterns MCT should handle).
         quant_config (QuantizationConfig): QuantizationConfig containing parameters of how the model should be
             quantized.
         bit_width_config (BitWidthConfig): Config for bit-width selection. Defaults to None.
@@ -160,7 +160,7 @@ def get_finalized_graph(initial_graph: Graph,
     ######################################
     # Layer fusing
     ######################################
-    transformed_graph = fusion(transformed_graph, tpc)
+    transformed_graph = fusion(transformed_graph, fqc)
 
     ######################################
     # Channel equalization
@@ -185,7 +185,7 @@ def get_finalized_graph(initial_graph: Graph,
 
 def read_model_to_graph(in_model: Any,
                         representative_data_gen: Callable,
-                        tpc: FrameworkQuantizationCapabilities,
+                        fqc: FrameworkQuantizationCapabilities,
                         fw_info: FrameworkInfo = None,
                         fw_impl: FrameworkImplementation = None) -> Graph:
 
@@ -195,7 +195,7 @@ def read_model_to_graph(in_model: Any,
     Args:
         in_model: Model to optimize and prepare for quantization.
         representative_data_gen: Dataset used for calibration.
-        tpc: FrameworkQuantizationCapabilities object that models the inference target platform and
+        fqc: FrameworkQuantizationCapabilities object that models the inference target platform and
                       the attached framework operator's information.
         fw_info: Information needed for quantization about the specific framework (e.g.,
                 kernel channels indices, groups of layers by how they should be quantized, etc.)
@@ -207,5 +207,5 @@ def read_model_to_graph(in_model: Any,
     graph = fw_impl.model_reader(in_model,
                                  representative_data_gen)
     graph.set_fw_info(fw_info)
-    graph.set_tpc(tpc)
+    graph.set_fqc(fqc)
     return graph
diff --git a/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py b/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py
index 7b96b450e..2ba1986ef 100644
--- a/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py
+++ b/model_compression_toolkit/core/pytorch/back2framework/pytorch_model_builder.py
@@ -225,7 +225,7 @@ def __init__(self,
         """
         super(PytorchModel, self).__init__()
         self.graph = copy.deepcopy(graph)
-        delattr(self.graph, 'tpc')
+        delattr(self.graph, 'fqc')
 
         self.node_sort = list(topological_sort(self.graph))
         self.node_to_activation_quantization_holder = {}
diff --git a/model_compression_toolkit/core/runner.py b/model_compression_toolkit/core/runner.py
index 43cc09b55..1f3b080e6 100644
--- a/model_compression_toolkit/core/runner.py
+++ b/model_compression_toolkit/core/runner.py
@@ -57,7 +57,7 @@ def core_runner(in_model: Any,
                 core_config: CoreConfig,
                 fw_info: FrameworkInfo,
                 fw_impl: FrameworkImplementation,
-                tpc: FrameworkQuantizationCapabilities,
+                fqc: FrameworkQuantizationCapabilities,
                 target_resource_utilization: ResourceUtilization = None,
                 running_gptq: bool = False,
                 tb_w: TensorboardWriter = None):
@@ -77,7 +77,7 @@ def core_runner(in_model: Any,
         fw_info: Information needed for quantization about the specific framework (e.g., kernel channels indices,
         groups of layers by how they should be quantized, etc.).
         fw_impl: FrameworkImplementation object with a specific framework methods implementation.
-        tpc: FrameworkQuantizationCapabilities object that models the inference target platform and
+        fqc: FrameworkQuantizationCapabilities object that models the inference target platform and
                                               the attached framework operator's information.
         target_resource_utilization: ResourceUtilization to constraint the search of the mixed-precision configuration for the model.
         tb_w: TensorboardWriter object for logging
@@ -105,7 +105,7 @@ def core_runner(in_model: Any,
                                     target_resource_utilization,
                                     representative_data_gen,
                                     core_config,
-                                    tpc,
+                                    fqc,
                                     fw_info,
                                     fw_impl):
             core_config.mixed_precision_config.set_mixed_precision_enable()
@@ -116,7 +116,7 @@ def core_runner(in_model: Any,
                                      core_config.quantization_config,
                                      fw_info,
                                      fw_impl,
-                                     tpc,
+                                     fqc,
                                      core_config.bit_width_config,
                                      tb_w,
                                      mixed_precision_enable=core_config.is_mixed_precision_enabled,
@@ -138,7 +138,7 @@ def core_runner(in_model: Any,
     if core_config.is_mixed_precision_enabled:
         if core_config.mixed_precision_config.configuration_overwrite is None:
 
-            filter_candidates_for_mixed_precision(graph, target_resource_utilization, fw_info, tpc)
+            filter_candidates_for_mixed_precision(graph, target_resource_utilization, fw_info, fqc)
             bit_widths_config = search_bit_width(tg,
                                                  fw_info,
                                                  fw_impl,
diff --git a/model_compression_toolkit/gptq/keras/quantization_facade.py b/model_compression_toolkit/gptq/keras/quantization_facade.py
index feb9238a7..148663109 100644
--- a/model_compression_toolkit/gptq/keras/quantization_facade.py
+++ b/model_compression_toolkit/gptq/keras/quantization_facade.py
@@ -179,7 +179,7 @@ def keras_gradient_post_training_quantization(in_model: Model, representative_da
             gptq_representative_data_gen (Callable): Dataset used for GPTQ training. If None defaults to representative_data_gen
             target_resource_utilization (ResourceUtilization): ResourceUtilization object to limit the search of the mixed-precision configuration as desired.
             core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
-            target_platform_capabilities (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to optimize the Keras model according to.
+            target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the Keras model according to.
 
         Returns:
 
@@ -242,7 +242,7 @@ def keras_gradient_post_training_quantization(in_model: Model, representative_da
 
         # Attach tpc model to framework
         attach2keras = AttachTpcToKeras()
-        target_platform_capabilities = attach2keras.attach(
+        framework_platform_capabilities = attach2keras.attach(
             target_platform_capabilities,
             custom_opset2layer=core_config.quantization_config.custom_tpc_opset_to_layer)
 
@@ -251,7 +251,7 @@ def keras_gradient_post_training_quantization(in_model: Model, representative_da
                                                                                    core_config=core_config,
                                                                                    fw_info=DEFAULT_KERAS_INFO,
                                                                                    fw_impl=fw_impl,
-                                                                                   tpc=target_platform_capabilities,
+                                                                                   fqc=framework_platform_capabilities,
                                                                                    target_resource_utilization=target_resource_utilization,
                                                                                    tb_w=tb_w,
                                                                                    running_gptq=True)
@@ -279,9 +279,9 @@ def keras_gradient_post_training_quantization(in_model: Model, representative_da
                                         DEFAULT_KERAS_INFO)
 
         exportable_model, user_info = get_exportable_keras_model(tg_gptq)
-        if target_platform_capabilities.tp_model.add_metadata:
+        if framework_platform_capabilities.tp_model.add_metadata:
             exportable_model = add_metadata(exportable_model,
-                                            create_model_metadata(tpc=target_platform_capabilities,
+                                            create_model_metadata(fqc=framework_platform_capabilities,
                                                                   scheduling_info=scheduling_info))
         return exportable_model, user_info
 
diff --git a/model_compression_toolkit/gptq/pytorch/quantization_facade.py b/model_compression_toolkit/gptq/pytorch/quantization_facade.py
index 60c1bb59a..20158a003 100644
--- a/model_compression_toolkit/gptq/pytorch/quantization_facade.py
+++ b/model_compression_toolkit/gptq/pytorch/quantization_facade.py
@@ -168,7 +168,7 @@ def pytorch_gradient_post_training_quantization(model: Module,
             core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
             gptq_config (GradientPTQConfig): Configuration for using gptq (e.g. optimizer).
             gptq_representative_data_gen (Callable): Dataset used for GPTQ training. If None defaults to representative_data_gen
-            target_platform_capabilities (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to optimize the PyTorch model according to.
+            target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the PyTorch model according to.
 
         Returns:
             A quantized module and information the user may need to handle the quantized module.
@@ -215,7 +215,7 @@ def pytorch_gradient_post_training_quantization(model: Module,
 
         # Attach tpc model to framework
         attach2pytorch = AttachTpcToPytorch()
-        target_platform_capabilities = attach2pytorch.attach(target_platform_capabilities,
+        framework_quantization_capabilities = attach2pytorch.attach(target_platform_capabilities,
                                                              core_config.quantization_config.custom_tpc_opset_to_layer)
 
         # ---------------------- #
@@ -226,7 +226,7 @@ def pytorch_gradient_post_training_quantization(model: Module,
                                                                                       core_config=core_config,
                                                                                       fw_info=DEFAULT_PYTORCH_INFO,
                                                                                       fw_impl=fw_impl,
-                                                                                      tpc=target_platform_capabilities,
+                                                                                      fqc=framework_quantization_capabilities,
                                                                                       target_resource_utilization=target_resource_utilization,
                                                                                       tb_w=tb_w,
                                                                                       running_gptq=True)
@@ -255,9 +255,9 @@ def pytorch_gradient_post_training_quantization(model: Module,
                                         DEFAULT_PYTORCH_INFO)
 
         exportable_model, user_info = get_exportable_pytorch_model(graph_gptq)
-        if target_platform_capabilities.tp_model.add_metadata:
+        if framework_quantization_capabilities.tp_model.add_metadata:
             exportable_model = add_metadata(exportable_model,
-                                            create_model_metadata(tpc=target_platform_capabilities,
+                                            create_model_metadata(fqc=target_platform_capabilities,
                                                                   scheduling_info=scheduling_info))
         return exportable_model, user_info
 
diff --git a/model_compression_toolkit/metadata.py b/model_compression_toolkit/metadata.py
index 9e8400bb9..1bec3f90e 100644
--- a/model_compression_toolkit/metadata.py
+++ b/model_compression_toolkit/metadata.py
@@ -21,30 +21,30 @@
 from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 
 
-def create_model_metadata(tpc: FrameworkQuantizationCapabilities,
+def create_model_metadata(fqc: FrameworkQuantizationCapabilities,
                           scheduling_info: SchedulerInfo = None) -> Dict:
     """
     Creates and returns a metadata dictionary for the model, including version information
     and optional scheduling information.
 
     Args:
-        tpc: A TPC object to get the version.
+        fqc: A FQC object to get the version.
         scheduling_info: An object containing scheduling details and metadata. Default is None.
 
     Returns:
         Dict: A dictionary containing the model's version information and optional scheduling information.
     """
-    _metadata = get_versions_dict(tpc)
+    _metadata = get_versions_dict(fqc)
     if scheduling_info:
         scheduler_metadata = get_scheduler_metadata(scheduler_info=scheduling_info)
         _metadata['scheduling_info'] = scheduler_metadata
     return _metadata
 
 
-def get_versions_dict(tpc) -> Dict:
+def get_versions_dict(fqc) -> Dict:
     """
 
-    Returns: A dictionary with TPC, MCT and TPC-Schema versions.
+    Returns: A dictionary with FQC, MCT and FQC-Schema versions.
 
     """
     # imported inside to avoid circular import error
@@ -53,10 +53,10 @@ def get_versions_dict(tpc) -> Dict:
     @dataclass
     class TPCVersions:
         mct_version: str
-        tpc_minor_version: str = f'{tpc.tp_model.tpc_minor_version}'
-        tpc_patch_version: str = f'{tpc.tp_model.tpc_patch_version}'
-        tpc_platform_type: str = f'{tpc.tp_model.tpc_platform_type}'
-        tpc_schema: str = f'{tpc.tp_model.SCHEMA_VERSION}'
+        tpc_minor_version: str = f'{fqc.tp_model.tpc_minor_version}'
+        tpc_patch_version: str = f'{fqc.tp_model.tpc_patch_version}'
+        tpc_platform_type: str = f'{fqc.tp_model.tpc_platform_type}'
+        tpc_schema: str = f'{fqc.tp_model.SCHEMA_VERSION}'
 
     return asdict(TPCVersions(mct_version))
 
diff --git a/model_compression_toolkit/pruning/pytorch/pruning_facade.py b/model_compression_toolkit/pruning/pytorch/pruning_facade.py
index 6c5433c30..149f41a3c 100644
--- a/model_compression_toolkit/pruning/pytorch/pruning_facade.py
+++ b/model_compression_toolkit/pruning/pytorch/pruning_facade.py
@@ -67,7 +67,7 @@ def pytorch_pruning_experimental(model: Module,
             target_resource_utilization (ResourceUtilization): Key Performance Indicators specifying the pruning targets.
             representative_data_gen (Callable): A function to generate representative data for pruning analysis.
             pruning_config (PruningConfig): Configuration settings for the pruning process. Defaults to standard config.
-            target_platform_capabilities (FrameworkQuantizationCapabilities): Platform-specific constraints and capabilities.
+            target_platform_capabilities (TargetPlatformCapabilities): Platform-specific constraints and capabilities.
                 Defaults to DEFAULT_PYTORCH_TPC.
 
         Returns:
@@ -121,12 +121,12 @@ def pytorch_pruning_experimental(model: Module,
 
         # Attach TPC to framework
         attach2pytorch = AttachTpcToPytorch()
-        target_platform_capabilities = attach2pytorch.attach(target_platform_capabilities)
+        framework_platform_capabilities = attach2pytorch.attach(target_platform_capabilities)
 
         # Convert the original Pytorch model to an internal graph representation.
         float_graph = read_model_to_graph(model,
                                           representative_data_gen,
-                                          target_platform_capabilities,
+                                          framework_platform_capabilities,
                                           DEFAULT_PYTORCH_INFO,
                                           fw_impl)
 
@@ -143,7 +143,7 @@ def pytorch_pruning_experimental(model: Module,
                         target_resource_utilization,
                         representative_data_gen,
                         pruning_config,
-                        target_platform_capabilities)
+                        framework_platform_capabilities)
 
         # Apply the pruning process.
         pruned_graph = pruner.prune_graph()
diff --git a/model_compression_toolkit/ptq/keras/quantization_facade.py b/model_compression_toolkit/ptq/keras/quantization_facade.py
index 86dcd14ac..33fd9dd6e 100644
--- a/model_compression_toolkit/ptq/keras/quantization_facade.py
+++ b/model_compression_toolkit/ptq/keras/quantization_facade.py
@@ -71,7 +71,7 @@ def keras_post_training_quantization(in_model: Model,
              representative_data_gen (Callable): Dataset used for calibration.
              target_resource_utilization (ResourceUtilization): ResourceUtilization object to limit the search of the mixed-precision configuration as desired.
              core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
-             target_platform_capabilities (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to optimize the Keras model according to.
+             target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the Keras model according to.
 
          Returns:
 
@@ -139,7 +139,7 @@ def keras_post_training_quantization(in_model: Model,
         fw_impl = KerasImplementation()
 
         attach2keras = AttachTpcToKeras()
-        target_platform_capabilities = attach2keras.attach(
+        framework_platform_capabilities = attach2keras.attach(
             target_platform_capabilities,
             custom_opset2layer=core_config.quantization_config.custom_tpc_opset_to_layer)
 
@@ -149,7 +149,7 @@ def keras_post_training_quantization(in_model: Model,
                                                                 core_config=core_config,
                                                                 fw_info=fw_info,
                                                                 fw_impl=fw_impl,
-                                                                tpc=target_platform_capabilities,
+                                                                fqc=framework_platform_capabilities,
                                                                 target_resource_utilization=target_resource_utilization,
                                                                 tb_w=tb_w)
 
@@ -177,9 +177,9 @@ def keras_post_training_quantization(in_model: Model,
                                         fw_info)
 
         exportable_model, user_info = get_exportable_keras_model(graph_with_stats_correction)
-        if target_platform_capabilities.tp_model.add_metadata:
+        if framework_platform_capabilities.tp_model.add_metadata:
             exportable_model = add_metadata(exportable_model,
-                                            create_model_metadata(tpc=target_platform_capabilities,
+                                            create_model_metadata(fqc=framework_platform_capabilities,
                                                                   scheduling_info=scheduling_info))
         return exportable_model, user_info
 
diff --git a/model_compression_toolkit/ptq/pytorch/quantization_facade.py b/model_compression_toolkit/ptq/pytorch/quantization_facade.py
index 4c9df057f..2edb98567 100644
--- a/model_compression_toolkit/ptq/pytorch/quantization_facade.py
+++ b/model_compression_toolkit/ptq/pytorch/quantization_facade.py
@@ -68,7 +68,7 @@ def pytorch_post_training_quantization(in_module: Module,
             representative_data_gen (Callable): Dataset used for calibration.
             target_resource_utilization (ResourceUtilization): ResourceUtilization object to limit the search of the mixed-precision configuration as desired.
             core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
-            target_platform_capabilities (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to optimize the PyTorch model according to.
+            target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the PyTorch model according to.
 
         Returns:
             A quantized module and information the user may need to handle the quantized module.
@@ -112,7 +112,7 @@ def pytorch_post_training_quantization(in_module: Module,
 
         # Attach tpc model to framework
         attach2pytorch = AttachTpcToPytorch()
-        target_platform_capabilities = attach2pytorch.attach(target_platform_capabilities,
+        framework_platform_capabilities = attach2pytorch.attach(target_platform_capabilities,
                                                              core_config.quantization_config.custom_tpc_opset_to_layer)
 
         # Ignore hessian info service as it is not used here yet.
@@ -121,7 +121,7 @@ def pytorch_post_training_quantization(in_module: Module,
                                                                 core_config=core_config,
                                                                 fw_info=fw_info,
                                                                 fw_impl=fw_impl,
-                                                                tpc=target_platform_capabilities,
+                                                                fqc=framework_platform_capabilities,
                                                                 target_resource_utilization=target_resource_utilization,
                                                                 tb_w=tb_w)
 
@@ -149,9 +149,9 @@ def pytorch_post_training_quantization(in_module: Module,
                                         fw_info)
 
         exportable_model, user_info = get_exportable_pytorch_model(graph_with_stats_correction)
-        if target_platform_capabilities.tp_model.add_metadata:
+        if framework_platform_capabilities.tp_model.add_metadata:
             exportable_model = add_metadata(exportable_model,
-                                            create_model_metadata(tpc=target_platform_capabilities,
+                                            create_model_metadata(fqc=framework_platform_capabilities,
                                                                   scheduling_info=scheduling_info))
         return exportable_model, user_info
 
diff --git a/model_compression_toolkit/qat/keras/quantization_facade.py b/model_compression_toolkit/qat/keras/quantization_facade.py
index c953dfa55..a38600132 100644
--- a/model_compression_toolkit/qat/keras/quantization_facade.py
+++ b/model_compression_toolkit/qat/keras/quantization_facade.py
@@ -115,7 +115,7 @@ def keras_quantization_aware_training_init_experimental(in_model: Model,
              target_resource_utilization (ResourceUtilization): ResourceUtilization object to limit the search of the mixed-precision configuration as desired.
              core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
              qat_config (QATConfig): QAT configuration
-             target_platform_capabilities (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to optimize the Keras model according to.
+             target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the Keras model according to.
 
          Returns:
 
@@ -200,7 +200,7 @@ def keras_quantization_aware_training_init_experimental(in_model: Model,
                                                   core_config=core_config,
                                                   fw_info=DEFAULT_KERAS_INFO,
                                                   fw_impl=fw_impl,
-                                                  tpc=target_platform_capabilities,
+                                                  fqc=target_platform_capabilities,
                                                   target_resource_utilization=target_resource_utilization,
                                                   tb_w=tb_w)
 
diff --git a/model_compression_toolkit/qat/pytorch/quantization_facade.py b/model_compression_toolkit/qat/pytorch/quantization_facade.py
index e66cb9dd9..9cbce0609 100644
--- a/model_compression_toolkit/qat/pytorch/quantization_facade.py
+++ b/model_compression_toolkit/qat/pytorch/quantization_facade.py
@@ -104,7 +104,7 @@ def pytorch_quantization_aware_training_init_experimental(in_model: Module,
              target_resource_utilization (ResourceUtilization): ResourceUtilization object to limit the search of the mixed-precision configuration as desired.
              core_config (CoreConfig): Configuration object containing parameters of how the model should be quantized, including mixed precision parameters.
              qat_config (QATConfig): QAT configuration
-             target_platform_capabilities (FrameworkQuantizationCapabilities): FrameworkQuantizationCapabilities to optimize the Pytorch model according to.
+             target_platform_capabilities (TargetPlatformCapabilities): TargetPlatformCapabilities to optimize the Pytorch model according to.
 
          Returns:
 
@@ -159,7 +159,7 @@ def pytorch_quantization_aware_training_init_experimental(in_model: Module,
 
         # Attach tpc model to framework
         attach2pytorch = AttachTpcToPytorch()
-        target_platform_capabilities = attach2pytorch.attach(target_platform_capabilities,
+        framework_platform_capabilities = attach2pytorch.attach(target_platform_capabilities,
                                                              core_config.quantization_config.custom_tpc_opset_to_layer)
 
         # Ignore hessian scores service as we do not use it here
@@ -168,7 +168,7 @@ def pytorch_quantization_aware_training_init_experimental(in_model: Module,
                                                   core_config=core_config,
                                                   fw_info=DEFAULT_PYTORCH_INFO,
                                                   fw_impl=fw_impl,
-                                                  tpc=target_platform_capabilities,
+                                                  fqc=framework_platform_capabilities,
                                                   target_resource_utilization=target_resource_utilization,
                                                   tb_w=tb_w)
 
diff --git a/model_compression_toolkit/xquant/common/model_folding_utils.py b/model_compression_toolkit/xquant/common/model_folding_utils.py
index c9e92d228..8e923379e 100644
--- a/model_compression_toolkit/xquant/common/model_folding_utils.py
+++ b/model_compression_toolkit/xquant/common/model_folding_utils.py
@@ -35,19 +35,19 @@ class ModelFoldingUtils:
     def __init__(self,
                  fw_info: FrameworkInfo,
                  fw_impl: FrameworkImplementation,
-                 fw_default_tpc: FrameworkQuantizationCapabilities):
+                 fw_default_fqc: FrameworkQuantizationCapabilities):
         """
         Initialize the ModelFoldingUtils class with framework-specific information, implementation details,
-        and default TPC.
+        and default FQC.
 
         Args:
             fw_info: Framework-specific information.
             fw_impl: Implementation functions for the framework.
-            fw_default_tpc: Default target platform capabilities for the handled framework.
+            fw_default_fqc: Default target platform capabilities for the handled framework.
         """
         self.fw_info = fw_info
         self.fw_impl = fw_impl
-        self.fw_default_tpc = fw_default_tpc
+        self.fw_default_fqc = fw_default_fqc
 
     def create_float_folded_model(self, float_model: Any, representative_dataset: Any = None) -> Any:
         """
@@ -101,5 +101,5 @@ def create_float_folded_graph(self, model: Any, repr_dataset: Callable) -> Graph
                                          fw_impl=self.fw_impl,
                                          fw_info=self.fw_info,
                                          quantization_config=DEFAULTCONFIG,
-                                         tpc=self.fw_default_tpc)
+                                         fqc=self.fw_default_fqc)
         return graph
diff --git a/model_compression_toolkit/xquant/keras/keras_report_utils.py b/model_compression_toolkit/xquant/keras/keras_report_utils.py
index f62401d7e..73c249b6f 100644
--- a/model_compression_toolkit/xquant/keras/keras_report_utils.py
+++ b/model_compression_toolkit/xquant/keras/keras_report_utils.py
@@ -46,12 +46,12 @@ def __init__(self, report_dir: str):
         # Set the default Target Platform Capabilities (TPC) for Keras.
         default_tpc = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
         attach2pytorch = AttachTpcToKeras()
-        target_platform_capabilities = attach2pytorch.attach(default_tpc)
+        framework_platform_capabilities = attach2pytorch.attach(default_tpc)
 
         dataset_utils = KerasDatasetUtils()
         model_folding = ModelFoldingUtils(fw_info=fw_info,
                                           fw_impl=fw_impl,
-                                          fw_default_tpc=target_platform_capabilities)
+                                          fw_default_fqc=framework_platform_capabilities)
 
         similarity_calculator = SimilarityCalculator(dataset_utils=dataset_utils,
                                                      model_folding=model_folding,
diff --git a/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py b/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py
index 4145d5942..454b83f10 100644
--- a/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py
+++ b/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py
@@ -44,12 +44,12 @@ def __init__(self, report_dir: str):
         # Set the default Target Platform Capabilities (TPC) for PyTorch.
         default_tpc = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
         attach2pytorch = AttachTpcToPytorch()
-        target_platform_capabilities = attach2pytorch.attach(default_tpc)
+        framework_quantization_capabilities = attach2pytorch.attach(default_tpc)
 
         dataset_utils = PytorchDatasetUtils()
         model_folding = ModelFoldingUtils(fw_info=fw_info,
                                           fw_impl=fw_impl,
-                                          fw_default_tpc=target_platform_capabilities)
+                                          fw_default_fqc=framework_quantization_capabilities)
 
         similarity_calculator = SimilarityCalculator(dataset_utils=dataset_utils,
                                                      model_folding=model_folding,
diff --git a/tests/common_tests/helpers/generate_test_tp_model.py b/tests/common_tests/helpers/generate_test_tp_model.py
index 8510f9a46..aee875bf5 100644
--- a/tests/common_tests/helpers/generate_test_tp_model.py
+++ b/tests/common_tests/helpers/generate_test_tp_model.py
@@ -166,13 +166,13 @@ def generate_custom_test_tp_model(name: str,
     return custom_tp_model
 
 
-def generate_test_tpc(name: str,
+def generate_test_fqc(name: str,
                       tp_model: schema.TargetPlatformCapabilities,
-                      base_tpc: tp.FrameworkQuantizationCapabilities,
+                      base_fqc: tp.FrameworkQuantizationCapabilities,
                       op_sets_to_layer_add: Dict[str, List[Any]] = None,
                       op_sets_to_layer_drop: Dict[str, List[Any]] = None,
                       attr_mapping: Dict[str, Dict] = {}):
-    op_set_to_layers_list = base_tpc.op_sets_to_layers.op_sets_to_layers
+    op_set_to_layers_list = base_fqc.op_sets_to_layers.op_sets_to_layers
     op_set_to_layers_dict = {op_set.name: op_set.layers for op_set in op_set_to_layers_list}
 
     merged_dict = copy.deepcopy(op_set_to_layers_dict)
@@ -189,14 +189,14 @@ def generate_test_tpc(name: str,
         # Remove empty op sets
         merged_dict = {op_set_name: layers for op_set_name, layers in merged_dict.items() if len(layers) == 0}
 
-    tpc = tp.FrameworkQuantizationCapabilities(tp_model)
+    fqc = tp.FrameworkQuantizationCapabilities(tp_model)
 
-    with tpc:
+    with fqc:
         for op_set_name, layers in merged_dict.items():
             am = attr_mapping.get(op_set_name)
             tp.OperationsSetToLayers(op_set_name, layers, attr_mapping=am)
 
-    return tpc
+    return fqc
 
 
 def generate_test_attr_configs(default_cfg_nbits: int = 8,
diff --git a/tests/common_tests/helpers/prep_graph_for_func_test.py b/tests/common_tests/helpers/prep_graph_for_func_test.py
index 5dfca2e06..fc0aaca1b 100644
--- a/tests/common_tests/helpers/prep_graph_for_func_test.py
+++ b/tests/common_tests/helpers/prep_graph_for_func_test.py
@@ -49,7 +49,7 @@ def prepare_graph_with_configs(in_model,
     _tp = generate_tp_model(default_config, base_config, op_cfg_list, "function_test")
     tpc = get_tpc_func("function_test", _tp)
 
-    tpc = attach2fw.attach(tpc, qc.custom_tpc_opset_to_layer)
+    fqc = attach2fw.attach(tpc, qc.custom_tpc_opset_to_layer)
 
     # Read Model
     graph = graph_preparation_runner(in_model,
@@ -57,7 +57,7 @@ def prepare_graph_with_configs(in_model,
                                      quantization_config=qc,
                                      fw_info=fw_info,
                                      fw_impl=fw_impl,
-                                     tpc=tpc,
+                                     fqc=fqc,
                                      mixed_precision_enable=mixed_precision_enabled,
                                      running_gptq=running_gptq)
 
@@ -106,7 +106,7 @@ def prepare_graph_set_bit_widths(in_model,
                                  fw_info,
                                  network_editor,
                                  analyze_similarity,
-                                 tpc,
+                                 fqc,
                                  mp_cfg):
 
     # Config
@@ -130,7 +130,7 @@ def _representative_data_gen():
                                      quantization_config=quant_config,
                                      fw_info=fw_info,
                                      fw_impl=fw_impl,
-                                     tpc=tpc,
+                                     fqc=fqc,
                                      bit_width_config=core_config.bit_width_config,
                                      mixed_precision_enable=core_config.is_mixed_precision_enabled)
 
diff --git a/tests/common_tests/test_tp_model.py b/tests/common_tests/test_tp_model.py
index 2e3670a93..6d7a084b4 100644
--- a/tests/common_tests/test_tp_model.py
+++ b/tests/common_tests/test_tp_model.py
@@ -274,7 +274,7 @@ def test_get_qco_for_none_tpc(self):
         mock_node = BaseNode(name="", framework_attr={}, input_shape=(), output_shape=(), weights={}, layer_class=None)
         with self.assertRaises(Exception) as e:
             mock_node.get_qco(None)
-        self.assertEqual('Can not retrieve QC options for None TPC', str(e.exception))
+        self.assertEqual('Can not retrieve QC options for None FQC', str(e.exception))
 
 
 class FusingTest(unittest.TestCase):
diff --git a/tests/external_tests/keras_tests/models_tests/test_networks_runner_float.py b/tests/external_tests/keras_tests/models_tests/test_networks_runner_float.py
index f8b6b11ab..e5ca3e3da 100644
--- a/tests/external_tests/keras_tests/models_tests/test_networks_runner_float.py
+++ b/tests/external_tests/keras_tests/models_tests/test_networks_runner_float.py
@@ -60,7 +60,7 @@ def run_network(self, inputs_list):
 
         graph = model_reader(self.model_float)  # model reading
         graph.set_fw_info(DEFAULT_KERAS_INFO)
-        graph.set_tpc(keras_default_tpc)
+        graph.set_fqc(keras_default_tpc)
         graph = set_quantization_configuration_to_graph(graph,
                                                         copy.deepcopy(DEFAULTCONFIG))
         ptq_model, _ = fw_impl.model_builder(graph,
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision/requires_mixed_precision_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision/requires_mixed_precision_test.py
index a82ddd149..f0bc7f6ed 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision/requires_mixed_precision_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision/requires_mixed_precision_test.py
@@ -69,12 +69,12 @@ def get_max_resources_for_model(self, model):
         tpc = self.get_tpc()
         cc = self.get_core_config()
         attach2keras = AttachTpcToKeras()
-        tpc = attach2keras.attach(tpc, cc.quantization_config.custom_tpc_opset_to_layer)
+        fqc = attach2keras.attach(tpc, cc.quantization_config.custom_tpc_opset_to_layer)
 
         return compute_resource_utilization_data(in_model=model,
                                                  representative_data_gen=self.representative_data_gen(),
                                                  core_config=cc,
-                                                 tpc=tpc,
+                                                 fqc=fqc,
                                                  fw_info=DEFAULT_KERAS_INFO,
                                                  fw_impl=KerasImplementation(),
                                                  transformed_graph=None,
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/edit_qc_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/edit_qc_test.py
index fb7408e70..a3dfbf5f0 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/edit_qc_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/edit_qc_test.py
@@ -51,14 +51,14 @@ def prepare_graph_for_first_network_editor(in_model, representative_data_gen, co
         core_config.mixed_precision_config.set_mixed_precision_enable()
 
     attach2keras = AttachTpcToKeras()
-    tpc = attach2keras.attach(tpc)
+    fqc = attach2keras.attach(tpc)
 
     transformed_graph = graph_preparation_runner(in_model,
                                                  representative_data_gen,
                                                  core_config.quantization_config,
                                                  fw_info,
                                                  fw_impl,
-                                                 tpc,
+                                                 fqc,
                                                  core_config.bit_width_config,
                                                  tb_w,
                                                  mixed_precision_enable=core_config.is_mixed_precision_enabled)
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py
index aa48c883f..8885ff795 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py
@@ -36,6 +36,7 @@
     keras_apply_second_moment_correction
 from model_compression_toolkit.core.runner import core_runner
 from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
@@ -268,7 +269,7 @@ def prepare_graph(self,
                       fw_info: FrameworkInfo = DEFAULT_KERAS_INFO,
                       network_editor: List[EditRule] = [],
                       analyze_similarity: bool = False,
-                      target_platform_capabilities: FrameworkQuantizationCapabilities = DEFAULT_KERAS_TPC) -> \
+                      target_platform_capabilities: TargetPlatformCapabilities = DEFAULT_KERAS_TPC) -> \
             Tuple[Graph, Graph]:
 
         KerasModelValidation(model=in_model,
@@ -284,7 +285,7 @@ def prepare_graph(self,
         fw_impl = KerasImplementation()
 
         attach2keras = AttachTpcToKeras()
-        target_platform_capabilities = attach2keras.attach(target_platform_capabilities)
+        framework_quantization_capabilities = attach2keras.attach(target_platform_capabilities)
 
         # Ignore initialized hessian service as it is not used here
         tg, bit_widths_config, _, _ = core_runner(in_model=in_model,
@@ -292,7 +293,7 @@ def prepare_graph(self,
                                                   core_config=core_config,
                                                   fw_info=fw_info,
                                                   fw_impl=fw_impl,
-                                                  tpc=target_platform_capabilities,
+                                                  fqc=framework_quantization_capabilities,
                                                   tb_w=tb_w)
         graph_to_apply_second_moment = copy.deepcopy(tg)
         semi_quantized_model = quantized_model_builder_for_second_moment_correction(graph_to_apply_second_moment,
diff --git a/tests/keras_tests/function_tests/test_activation_weights_composition_substitution.py b/tests/keras_tests/function_tests/test_activation_weights_composition_substitution.py
index f88f9f271..1cccf7553 100644
--- a/tests/keras_tests/function_tests/test_activation_weights_composition_substitution.py
+++ b/tests/keras_tests/function_tests/test_activation_weights_composition_substitution.py
@@ -110,10 +110,10 @@ def prepare_graph(in_model, keras_impl, mixed_precision_candidates_list, base_co
                                            name="activation_weights_composition_test")
 
     attach2keras = AttachTpcToKeras()
-    tpc = attach2keras.attach(tpc, qc.custom_tpc_opset_to_layer)
+    fqc = attach2keras.attach(tpc, qc.custom_tpc_opset_to_layer)
 
     graph.set_fw_info(fw_info)
-    graph.set_tpc(tpc)
+    graph.set_fqc(fqc)
 
     # Standard graph substitutions
     graph = substitute(graph, keras_impl.get_substitutions_prepare_graph())
@@ -125,7 +125,7 @@ def prepare_graph(in_model, keras_impl, mixed_precision_candidates_list, base_co
     graph = set_quantization_configuration_to_graph(graph=graph,
                                                     quant_config=qc,
                                                     mixed_precision_enable=True)
-    graph = fusion(graph, tpc)
+    graph = fusion(graph, fqc)
     graph = filter_nodes_candidates(graph)
 
     return graph
diff --git a/tests/keras_tests/function_tests/test_cfg_candidates_filter.py b/tests/keras_tests/function_tests/test_cfg_candidates_filter.py
index 421ee46ba..eeeb898e0 100644
--- a/tests/keras_tests/function_tests/test_cfg_candidates_filter.py
+++ b/tests/keras_tests/function_tests/test_cfg_candidates_filter.py
@@ -52,14 +52,14 @@ def prepare_graph(in_model, base_config, default_config, bitwidth_candidates):
     graph = keras_impl.model_reader(in_model, None)  # model reading
 
     attach2keras = AttachTpcToKeras()
-    tpc = attach2keras.attach(tpc, custom_opset2layer={"Input": CustomOpsetLayers([InputLayer])})
+    fqc = attach2keras.attach(tpc, custom_opset2layer={"Input": CustomOpsetLayers([InputLayer])})
 
-    graph.set_tpc(tpc)
+    graph.set_fqc(fqc)
     graph.set_fw_info(fw_info)
     graph = set_quantization_configuration_to_graph(graph=graph,
                                                     quant_config=mct.core.QuantizationConfig(),
                                                     mixed_precision_enable=True)
-    graph = fusion(graph, tpc)
+    graph = fusion(graph, fqc)
 
     return graph
 
diff --git a/tests/keras_tests/function_tests/test_sensitivity_metric_interest_points.py b/tests/keras_tests/function_tests/test_sensitivity_metric_interest_points.py
index 5feb09f2a..e46b29e33 100644
--- a/tests/keras_tests/function_tests/test_sensitivity_metric_interest_points.py
+++ b/tests/keras_tests/function_tests/test_sensitivity_metric_interest_points.py
@@ -67,9 +67,9 @@ def dummy_representative_dataset():
                                                                       c.activation_n_bits) for c in mixed_precision_cfg_list],
                                         name="sem_test")
 
-    tpc = AttachTpcToKeras().attach(tpc, custom_opset2layer={"Input": ([InputLayer],)})
+    fqc = AttachTpcToKeras().attach(tpc, custom_opset2layer={"Input": ([InputLayer],)})
 
-    graph.set_tpc(tpc)
+    graph.set_fqc(fqc)
     graph = set_quantization_configuration_to_graph(graph=graph,
                                                     quant_config=DEFAULTCONFIG,
                                                     mixed_precision_enable=True)
diff --git a/tests/keras_tests/function_tests/test_unsupported_custom_layer.py b/tests/keras_tests/function_tests/test_unsupported_custom_layer.py
index 05972fef3..30d8cb212 100644
--- a/tests/keras_tests/function_tests/test_unsupported_custom_layer.py
+++ b/tests/keras_tests/function_tests/test_unsupported_custom_layer.py
@@ -40,7 +40,7 @@ def test_raised_error_with_custom_layer(self):
         model = keras.Model(inputs=inputs, outputs=x)
 
         expected_error = f"MCT does not support optimizing Keras custom layers. Found a layer of type <class 'test_unsupported_custom_layer.CustomIdentity'>. " \
-                         f" Please add the custom layer to Target Platform Capabilities (TPC), or file a feature request or an issue if you believe this should be supported."
+                         f" Please add the custom layer to Framework Quantization Capabilities (FQC), or file a feature request or an issue if you believe this should be supported."
 
         def rep_dataset():
             yield [np.random.randn(1, 3, 3, 3)]
diff --git a/tests/keras_tests/non_parallel_tests/test_lp_search_bitwidth.py b/tests/keras_tests/non_parallel_tests/test_lp_search_bitwidth.py
index 85c41581c..af560beaf 100644
--- a/tests/keras_tests/non_parallel_tests/test_lp_search_bitwidth.py
+++ b/tests/keras_tests/non_parallel_tests/test_lp_search_bitwidth.py
@@ -233,10 +233,10 @@ def dummy_representative_dataset():
 
         graph = keras_impl.model_reader(in_model, dummy_representative_dataset)  # model reading
 
-        tpc = AttachTpcToKeras().attach(tpc)
+        fqc = AttachTpcToKeras().attach(tpc)
 
         graph.set_fw_info(fw_info)
-        graph.set_tpc(tpc)
+        graph.set_fqc(fqc)
         graph = set_quantization_configuration_to_graph(graph=graph,
                                                         quant_config=core_config.quantization_config,
                                                         mixed_precision_enable=True)
diff --git a/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py b/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py
index 987d6f5b6..4d2d16aaa 100644
--- a/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py
+++ b/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py
@@ -146,7 +146,7 @@ def plot_tensor_sizes(self, core_config):
                                          (4, 8), (4, 4), (4, 2),
                                          (2, 8), (2, 4), (2, 2)])
         tpc = generate_keras_tpc(name='mp_keras_tpc', tp_model=tpc_model)
-        tpc =AttachTpcToKeras().attach(tpc, core_config.quantization_config.custom_tpc_opset_to_layer)
+        fqc =AttachTpcToKeras().attach(tpc, core_config.quantization_config.custom_tpc_opset_to_layer)
 
         # Hessian service assumes core should be initialized. This test does not do it, so we disable the use of hessians in MP
         cfg = mct.core.DEFAULTCONFIG
@@ -159,7 +159,7 @@ def plot_tensor_sizes(self, core_config):
                                           fw_impl=KerasImplementation(),
                                           fw_info=DEFAULT_KERAS_INFO,
                                           representative_data_gen=random_datagen,
-                                          tpc=tpc,
+                                          fqc=fqc,
                                           network_editor=[],
                                           quant_config=cfg,
                                           target_resource_utilization=mct.core.ResourceUtilization(),
diff --git a/tests/keras_tests/tpc_keras.py b/tests/keras_tests/tpc_keras.py
index 08055fe35..374eec308 100644
--- a/tests/keras_tests/tpc_keras.py
+++ b/tests/keras_tests/tpc_keras.py
@@ -29,7 +29,7 @@
 import model_compression_toolkit as mct
 
 from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model, \
-    generate_mixed_precision_test_tp_model, generate_tp_model_with_activation_mp, generate_test_tpc
+    generate_mixed_precision_test_tp_model, generate_tp_model_with_activation_mp
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 
 tp = mct.target_platform

From 2c49b8920eb968d7e26b95d4b1a3209a27d9a589 Mon Sep 17 00:00:00 2001
From: liord <lior.dikstein@altair-semi.com>
Date: Sun, 12 Jan 2025 14:08:53 +0200
Subject: [PATCH 06/18] Rename tp_model->tpc

---
 .github/labeler.yml                           |  2 +-
 .../core/common/graph/base_node.py            |  2 +-
 .../keras/resource_utilization_data_facade.py |  4 +-
 .../resource_utilization_data_facade.py       |  4 +-
 .../gptq/keras/quantization_facade.py         |  6 +-
 .../gptq/pytorch/quantization_facade.py       |  8 +--
 model_compression_toolkit/metadata.py         |  8 +--
 .../pruning/keras/pruning_facade.py           |  4 +-
 .../pruning/pytorch/pruning_facade.py         |  4 +-
 .../ptq/keras/quantization_facade.py          |  6 +-
 .../ptq/pytorch/quantization_facade.py        |  6 +-
 .../qat/keras/quantization_facade.py          |  6 +-
 .../qat/pytorch/quantization_facade.py        |  4 +-
 .../target_platform_capabilities/README.md    |  4 +-
 .../target_platform_capabilities/constants.py |  8 +--
 .../schema/schema_functions.py                | 30 ++++----
 .../target_platform_capabilities/schema/v1.py |  2 +-
 .../framework_quantization_capabilities.py    | 40 +++++------
 .../operations_to_layers.py                   |  2 +-
 .../tpc_io_handler.py                         | 24 +++----
 .../get_target_platform_capabilities.py       | 32 ++++-----
 .../tpc_models/imx500_tpc/latest/__init__.py  |  6 +-
 .../imx500_tpc/v1/{tp_model.py => tpc.py}     | 24 +++----
 .../tpc_models/qnnpack_tpc/latest/__init__.py |  6 +-
 .../qnnpack_tpc/v1/{tp_model.py => tpc.py}    | 24 +++----
 .../tpc_models/tflite_tpc/latest/__init__.py  |  8 +--
 .../tflite_tpc/v1/{tp_model.py => tpc.py}     | 24 +++----
 .../xquant/keras/keras_report_utils.py        |  4 +-
 .../xquant/pytorch/pytorch_report_utils.py    |  4 +-
 ..._test_tp_model.py => generate_test_tpc.py} | 68 +++++++++----------
 .../helpers/prep_graph_for_func_test.py       |  4 +-
 .../tpcs_for_tests/v1/{tp_model.py => tpc.py} | 30 ++++----
 .../v1_lut/{tp_model.py => tpc.py}            | 28 ++++----
 .../v1_pot/{tp_model.py => tpc.py}            | 28 ++++----
 .../tpcs_for_tests/v2/{tp_model.py => tpc.py} | 30 ++++----
 .../v2_lut/{tp_model.py => tpc.py}            | 28 ++++----
 .../tpcs_for_tests/v3/{tp_model.py => tpc.py} | 30 ++++----
 .../v3_lut/{tp_model.py => tpc.py}            | 28 ++++----
 .../tpcs_for_tests/v4/{tp_model.py => tpc.py} | 30 ++++----
 .../{test_tp_model.py => test_tpc.py}         | 36 +++++-----
 .../models_tests/test_networks_runner.py      |  6 +-
 .../test_networks_runner_float.py             |  4 +-
 .../test_sony_ssd_postprocess_layer.py        |  4 +-
 .../keras_fake_quant_exporter_base_test.py    |  4 +-
 .../keras_fake_quant/networks/conv2d_test.py  | 10 +--
 .../networks/conv2dtranspose_test.py          |  6 +-
 .../keras_fake_quant/networks/dense_test.py   |  6 +-
 .../networks/dwconv2d_test.py                 |  6 +-
 .../networks/multiple_inputs_test.py          |  6 +-
 .../networks/no_quant_test.py                 |  6 +-
 .../keras_mctq/networks/conv2d_test.py        | 10 +--
 .../networks/conv2dtranspose_test.py          |  6 +-
 .../keras_mctq/networks/dense_test.py         |  6 +-
 .../keras_mctq/networks/dwconv2d_test.py      |  6 +-
 .../networks/multiple_inputs_test.py          |  6 +-
 .../keras_mctq/networks/no_quant_test.py      |  6 +-
 .../tflite_fake_quant/networks/conv2d_test.py | 10 +--
 .../tflite_fake_quant/networks/dense_test.py  |  6 +-
 .../tflite_fake_quant_exporter_base_test.py   |  4 +-
 ...00_int8_tp_model.py => imx500_int8_tpc.py} | 22 +++---
 .../tflite_int8/networks/conv2d_test.py       |  2 +-
 .../tflite_int8/networks/mobilenetv2_test.py  |  2 +-
 .../tflite_int8_exporter_base_test.py         |  2 +-
 .../base_keras_feature_test.py                |  4 +-
 .../feature_networks/activation_16bit_test.py | 18 ++---
 .../bn_attributes_quantization_test.py        |  2 +-
 .../feature_networks/bn_folding_test.py       | 10 +--
 .../feature_networks/compute_max_cut_test.py  |  6 +-
 .../const_quantization_test.py                | 26 +++----
 .../const_representation_test.py              | 18 ++---
 .../conv_func_substitutions_test.py           |  8 +--
 .../linear_collapsing_test.py                 |  6 +-
 .../feature_networks/lut_quantizer.py         | 12 ++--
 .../feature_networks/manual_bit_selection.py  | 18 ++---
 .../matmul_substitution_test.py               |  6 +-
 .../feature_networks/metadata_test.py         |  6 +-
 .../requires_mixed_precision_test.py          |  2 +-
 .../feature_networks/mixed_precision_tests.py |  8 +--
 .../network_editor/node_filter_test.py        | 14 ++--
 .../per_tensor_weight_quantization_test.py    |  6 +-
 .../residual_collapsing_test.py               |  6 +-
 .../reused_layer_mixed_precision_test.py      |  2 +-
 .../second_moment_correction_test.py          | 18 ++---
 .../sigmoid_mul_substitution_test.py          |  6 +-
 ...ric_threshold_selection_activation_test.py |  6 +-
 .../feature_networks/test_kmeans_quantizer.py |  6 +-
 .../feature_networks/tpc_test.py              |  4 +-
 ...uniform_range_selection_activation_test.py |  6 +-
 .../weights_mixed_precision_tests.py          | 10 +--
 .../test_features_runner.py                   |  6 +-
 ...vation_weights_composition_substitution.py |  2 +-
 .../test_cfg_candidates_filter.py             |  2 +-
 .../function_tests/test_custom_layer.py       |  6 +-
 ...test_export_keras_fully_quantized_model.py |  4 +-
 .../function_tests/test_get_gptq_config.py    | 10 +--
 .../function_tests/test_hmse_error_method.py  |  8 +--
 ...st_kl_error_quantization_configurations.py | 10 +--
 .../test_quant_config_filtering.py            | 10 +--
 .../test_quantization_configurations.py       | 10 +--
 .../test_resource_utilization_data.py         |  2 +-
 .../test_set_layer_to_bitwidth.py             |  2 +-
 ...t_symmetric_threshold_selection_weights.py |  6 +-
 .../test_uniform_range_selection_weights.py   |  6 +-
 .../layer_tests/base_keras_layer_test.py      |  6 +-
 ...st_keras_tp_model.py => test_keras_tpc.py} | 20 +++---
 .../test_tensorboard_writer.py                |  8 +--
 .../conv2d_conv2dtranspose_pruning_test.py    |  6 +-
 .../networks_tests/conv2d_pruning_test.py     |  6 +-
 .../conv2dtranspose_conv2d_pruning_test.py    |  6 +-
 .../conv2dtranspose_pruning_test.py           |  6 +-
 .../networks_tests/dense_pruning_test.py      |  6 +-
 tests/keras_tests/tpc_keras.py                | 46 ++++++-------
 .../xquant_tests/test_xquant_end2end.py       |  6 +-
 .../base_pytorch_export_test.py               |  6 +-
 ...st_export_lut_symmetric_onnx_quantizers.py | 10 +--
 .../test_export_pot_onnx_quantizers.py        |  6 +-
 .../test_export_symmetric_onnx_quantizers.py  |  6 +-
 .../test_export_uniform_onnx_quantizers.py    |  6 +-
 .../test_exporting_qat_models.py              |  6 +-
 .../function_tests/get_gptq_config_test.py    |  6 +-
 .../resource_utilization_data_test.py         |  4 +-
 .../set_layer_to_bitwidth_test.py             |  6 +-
 ...st_export_pytorch_fully_quantized_model.py | 10 +--
 ...ytorch_tp_model.py => test_pytorch_tpc.py} | 20 +++---
 .../test_quant_config_filtering.py            | 10 +--
 .../test_quantization_configurations.py       | 10 +--
 .../layer_tests/base_pytorch_layer_test.py    | 10 +--
 .../model_tests/base_pytorch_feature_test.py  |  4 +-
 .../model_tests/base_pytorch_test.py          | 14 ++--
 .../feature_models/activation_16bit_test.py   | 18 ++---
 .../bn_attributes_quantization_test.py        |  2 +-
 .../feature_models/compute_max_cut_test.py    |  6 +-
 .../feature_models/const_quantization_test.py | 10 +--
 .../const_representation_test.py              | 14 ++--
 .../constant_conv_substitution_test.py        |  6 +-
 .../model_tests/feature_models/gptq_test.py   |  4 +-
 .../feature_models/linear_collapsing_test.py  |  6 +-
 .../feature_models/lut_quantizer_test.py      |  6 +-
 .../feature_models/manual_bit_selection.py    | 16 ++---
 .../feature_models/metadata_test.py           |  6 +-
 .../mixed_precision_activation_test.py        | 14 ++--
 .../mixed_precision_bops_test.py              |  4 +-
 .../mixed_precision_weights_test.py           | 16 ++---
 .../permute_substitution_test.py              |  6 +-
 .../model_tests/feature_models/qat_test.py    | 12 ++--
 .../feature_models/relu_bound_test.py         |  6 +-
 .../reshape_substitution_test.py              |  6 +-
 .../residual_collapsing_test.py               |  6 +-
 .../feature_models/scale_equalization_test.py |  4 +-
 .../second_moment_correction_test.py          | 14 ++--
 .../shift_negative_activation_test.py         |  4 +-
 .../symmetric_activation_test.py              |  6 +-
 .../feature_models/test_softmax_shift.py      |  4 +-
 .../model_tests/feature_models/tpc_test.py    |  6 +-
 .../feature_models/uniform_activation_test.py |  6 +-
 .../model_tests/test_feature_models_runner.py |  6 +-
 .../network_tests/conv2d_pruning_test.py      |  2 +-
 .../conv2dtranspose_conv2d_pruning_test.py    |  2 +-
 .../conv2dtranspose_pruning_test.py           |  2 +-
 .../network_tests/linear_pruning_test.py      |  2 +-
 .../pruning_pytorch_feature_test.py           |  6 +-
 tests/pytorch_tests/tpc_pytorch.py            | 10 +--
 .../xquant_tests/test_xquant_end2end.py       |  6 +-
 tests/test_suite.py                           |  6 +-
 164 files changed, 805 insertions(+), 805 deletions(-)
 rename model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/{tp_model.py => tpc.py} (95%)
 rename model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/{tp_model.py => tpc.py} (92%)
 rename model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/{tp_model.py => tpc.py} (94%)
 rename tests/common_tests/helpers/{generate_test_tp_model.py => generate_test_tpc.py} (84%)
 rename tests/common_tests/helpers/tpcs_for_tests/v1/{tp_model.py => tpc.py} (94%)
 rename tests/common_tests/helpers/tpcs_for_tests/v1_lut/{tp_model.py => tpc.py} (95%)
 rename tests/common_tests/helpers/tpcs_for_tests/v1_pot/{tp_model.py => tpc.py} (95%)
 rename tests/common_tests/helpers/tpcs_for_tests/v2/{tp_model.py => tpc.py} (95%)
 rename tests/common_tests/helpers/tpcs_for_tests/v2_lut/{tp_model.py => tpc.py} (95%)
 rename tests/common_tests/helpers/tpcs_for_tests/v3/{tp_model.py => tpc.py} (95%)
 rename tests/common_tests/helpers/tpcs_for_tests/v3_lut/{tp_model.py => tpc.py} (95%)
 rename tests/common_tests/helpers/tpcs_for_tests/v4/{tp_model.py => tpc.py} (96%)
 rename tests/common_tests/{test_tp_model.py => test_tpc.py} (92%)
 rename tests/keras_tests/exporter_tests/tflite_int8/{imx500_int8_tp_model.py => imx500_int8_tpc.py} (89%)
 rename tests/keras_tests/non_parallel_tests/{test_keras_tp_model.py => test_keras_tpc.py} (97%)
 rename tests/pytorch_tests/function_tests/{test_pytorch_tp_model.py => test_pytorch_tpc.py} (97%)

diff --git a/.github/labeler.yml b/.github/labeler.yml
index ce0db3416..3472296f0 100644
--- a/.github/labeler.yml
+++ b/.github/labeler.yml
@@ -27,7 +27,7 @@ auto:qat:
 - model_compression_toolkit/qat/**
 
 auto:target_platform_capabilities:
-- model_compression_toolkit/target_platform_capabilities/**
+- model_compression_toolkit/framework_quantization_capabilities/**
 
 auto:trainable_infrastructure:
 - model_compression_toolkit/trainable_infrastructure/**
diff --git a/model_compression_toolkit/core/common/graph/base_node.py b/model_compression_toolkit/core/common/graph/base_node.py
index 5b6b1e0e8..c246fb147 100644
--- a/model_compression_toolkit/core/common/graph/base_node.py
+++ b/model_compression_toolkit/core/common/graph/base_node.py
@@ -561,7 +561,7 @@ def get_qco(self, fqc: FrameworkQuantizationCapabilities) -> QuantizationConfigO
                 return matching_qcos[0]
             else:
                 Logger.critical(f"Found duplicate qco types for node '{self.name}' of type '{self.type}'!")  # pragma: no cover
-        return fqc.tp_model.default_qco
+        return fqc.tpc.default_qco
 
     def filter_node_qco_by_graph(self, fqc: FrameworkQuantizationCapabilities,
                                  next_nodes: List, node_qc_options: QuantizationConfigOptions
diff --git a/model_compression_toolkit/core/keras/resource_utilization_data_facade.py b/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
index 493007d44..4ddfe75a8 100644
--- a/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
+++ b/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
@@ -24,7 +24,7 @@
 from model_compression_toolkit.verify_packages import FOUND_TF
 
 if FOUND_TF:
-    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
     from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
     from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
     from tensorflow.keras.models import Model
@@ -33,7 +33,7 @@
 
     from model_compression_toolkit import get_target_platform_capabilities
 
-    KERAS_DEFAULT_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
+    KERAS_DEFAULT_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
 
     def keras_resource_utilization_data(in_model: Model,
                                         representative_data_gen: Callable,
diff --git a/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py b/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py
index ef5dc0a13..ff130265f 100644
--- a/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py
+++ b/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py
@@ -23,7 +23,7 @@
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization_data import compute_resource_utilization_data
 from model_compression_toolkit.core.common.quantization.core_config import CoreConfig
 from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import MixedPrecisionQuantizationConfig
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
 from model_compression_toolkit.verify_packages import FOUND_TORCH
 
 if FOUND_TORCH:
@@ -35,7 +35,7 @@
 
     from model_compression_toolkit import get_target_platform_capabilities
 
-    PYTORCH_DEFAULT_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
+    PYTORCH_DEFAULT_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
 
 
     def pytorch_resource_utilization_data(in_model: Module,
diff --git a/model_compression_toolkit/gptq/keras/quantization_facade.py b/model_compression_toolkit/gptq/keras/quantization_facade.py
index 148663109..27f6e3d04 100644
--- a/model_compression_toolkit/gptq/keras/quantization_facade.py
+++ b/model_compression_toolkit/gptq/keras/quantization_facade.py
@@ -44,7 +44,7 @@
     from model_compression_toolkit.core.keras.keras_model_validation import KerasModelValidation
     from tensorflow.keras.models import Model
     from model_compression_toolkit.gptq.keras.gptq_loss import GPTQMultipleTensorsLoss, sample_layer_attention_loss
-    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
     from model_compression_toolkit.exporter.model_wrapper import get_exportable_keras_model
     from model_compression_toolkit import get_target_platform_capabilities
     from mct_quantizers.keras.metadata import add_metadata
@@ -59,7 +59,7 @@
     else:
         from tensorflow.python.keras.optimizer_v2.optimizer_v2 import OptimizerV2
 
-    DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
+    DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
 
     def get_keras_gptq_config(n_epochs: int,
                               optimizer: OptimizerV2 = None,
@@ -279,7 +279,7 @@ def keras_gradient_post_training_quantization(in_model: Model, representative_da
                                         DEFAULT_KERAS_INFO)
 
         exportable_model, user_info = get_exportable_keras_model(tg_gptq)
-        if framework_platform_capabilities.tp_model.add_metadata:
+        if framework_platform_capabilities.tpc.add_metadata:
             exportable_model = add_metadata(exportable_model,
                                             create_model_metadata(fqc=framework_platform_capabilities,
                                                                   scheduling_info=scheduling_info))
diff --git a/model_compression_toolkit/gptq/pytorch/quantization_facade.py b/model_compression_toolkit/gptq/pytorch/quantization_facade.py
index 20158a003..71df74a0b 100644
--- a/model_compression_toolkit/gptq/pytorch/quantization_facade.py
+++ b/model_compression_toolkit/gptq/pytorch/quantization_facade.py
@@ -40,7 +40,7 @@
 if FOUND_TORCH:
     from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
     from model_compression_toolkit.gptq.pytorch.gptq_pytorch_implementation import GPTQPytorchImplemantation
-    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
     from model_compression_toolkit.gptq.pytorch.gptq_loss import multiple_tensors_mse_loss, sample_layer_attention_loss
     from model_compression_toolkit.exporter.model_wrapper.pytorch.builder.fully_quantized_model_builder import get_exportable_pytorch_model
     import torch
@@ -51,7 +51,7 @@
     from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
         AttachTpcToPytorch
 
-    DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
+    DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
 
     def get_pytorch_gptq_config(n_epochs: int,
                                 optimizer: Optimizer = None,
@@ -255,9 +255,9 @@ def pytorch_gradient_post_training_quantization(model: Module,
                                         DEFAULT_PYTORCH_INFO)
 
         exportable_model, user_info = get_exportable_pytorch_model(graph_gptq)
-        if framework_quantization_capabilities.tp_model.add_metadata:
+        if framework_quantization_capabilities.tpc.add_metadata:
             exportable_model = add_metadata(exportable_model,
-                                            create_model_metadata(fqc=target_platform_capabilities,
+                                            create_model_metadata(fqc=framework_quantization_capabilities,
                                                                   scheduling_info=scheduling_info))
         return exportable_model, user_info
 
diff --git a/model_compression_toolkit/metadata.py b/model_compression_toolkit/metadata.py
index 1bec3f90e..f223c5f29 100644
--- a/model_compression_toolkit/metadata.py
+++ b/model_compression_toolkit/metadata.py
@@ -53,10 +53,10 @@ def get_versions_dict(fqc) -> Dict:
     @dataclass
     class TPCVersions:
         mct_version: str
-        tpc_minor_version: str = f'{fqc.tp_model.tpc_minor_version}'
-        tpc_patch_version: str = f'{fqc.tp_model.tpc_patch_version}'
-        tpc_platform_type: str = f'{fqc.tp_model.tpc_platform_type}'
-        tpc_schema: str = f'{fqc.tp_model.SCHEMA_VERSION}'
+        tpc_minor_version: str = f'{fqc.tpc.tpc_minor_version}'
+        tpc_patch_version: str = f'{fqc.tpc.tpc_patch_version}'
+        tpc_platform_type: str = f'{fqc.tpc.tpc_platform_type}'
+        tpc_schema: str = f'{fqc.tpc.SCHEMA_VERSION}'
 
     return asdict(TPCVersions(mct_version))
 
diff --git a/model_compression_toolkit/pruning/keras/pruning_facade.py b/model_compression_toolkit/pruning/keras/pruning_facade.py
index beda9c3f2..8fee1abcd 100644
--- a/model_compression_toolkit/pruning/keras/pruning_facade.py
+++ b/model_compression_toolkit/pruning/keras/pruning_facade.py
@@ -28,7 +28,7 @@
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.quantization.quantization_config import DEFAULTCONFIG
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
 
 if FOUND_TF:
     from model_compression_toolkit.core.keras.back2framework.float_model_builder import FloatKerasModelBuilder
@@ -38,7 +38,7 @@
     from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
         AttachTpcToKeras
 
-    DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
+    DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
 
     def keras_pruning_experimental(model: Model,
                                    target_resource_utilization: ResourceUtilization,
diff --git a/model_compression_toolkit/pruning/pytorch/pruning_facade.py b/model_compression_toolkit/pruning/pytorch/pruning_facade.py
index 149f41a3c..47bc15df2 100644
--- a/model_compression_toolkit/pruning/pytorch/pruning_facade.py
+++ b/model_compression_toolkit/pruning/pytorch/pruning_facade.py
@@ -27,7 +27,7 @@
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.quantization.quantization_config import DEFAULTCONFIG
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
 
 
 # Check if PyTorch is available in the environment.
@@ -42,7 +42,7 @@
         AttachTpcToPytorch
 
     # Set the default Target Platform Capabilities (TPC) for PyTorch.
-    DEFAULT_PYOTRCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
+    DEFAULT_PYOTRCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
 
     def pytorch_pruning_experimental(model: Module,
                                      target_resource_utilization: ResourceUtilization,
diff --git a/model_compression_toolkit/ptq/keras/quantization_facade.py b/model_compression_toolkit/ptq/keras/quantization_facade.py
index 33fd9dd6e..2712d8f47 100644
--- a/model_compression_toolkit/ptq/keras/quantization_facade.py
+++ b/model_compression_toolkit/ptq/keras/quantization_facade.py
@@ -37,7 +37,7 @@
     from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
     from model_compression_toolkit.core.keras.keras_model_validation import KerasModelValidation
     from tensorflow.keras.models import Model
-    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
     from model_compression_toolkit.exporter.model_wrapper import get_exportable_keras_model
 
     from model_compression_toolkit import get_target_platform_capabilities
@@ -45,7 +45,7 @@
     from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
         AttachTpcToKeras
 
-    DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
+    DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
 
 
     def keras_post_training_quantization(in_model: Model,
@@ -177,7 +177,7 @@ def keras_post_training_quantization(in_model: Model,
                                         fw_info)
 
         exportable_model, user_info = get_exportable_keras_model(graph_with_stats_correction)
-        if framework_platform_capabilities.tp_model.add_metadata:
+        if framework_platform_capabilities.tpc.add_metadata:
             exportable_model = add_metadata(exportable_model,
                                             create_model_metadata(fqc=framework_platform_capabilities,
                                                                   scheduling_info=scheduling_info))
diff --git a/model_compression_toolkit/ptq/pytorch/quantization_facade.py b/model_compression_toolkit/ptq/pytorch/quantization_facade.py
index 2edb98567..506bbabea 100644
--- a/model_compression_toolkit/ptq/pytorch/quantization_facade.py
+++ b/model_compression_toolkit/ptq/pytorch/quantization_facade.py
@@ -35,7 +35,7 @@
 if FOUND_TORCH:
     from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
     from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
-    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
     from torch.nn import Module
     from model_compression_toolkit.exporter.model_wrapper.pytorch.builder.fully_quantized_model_builder import get_exportable_pytorch_model
     from model_compression_toolkit import get_target_platform_capabilities
@@ -43,7 +43,7 @@
     from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
         AttachTpcToPytorch
 
-    DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
+    DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
 
     def pytorch_post_training_quantization(in_module: Module,
                                            representative_data_gen: Callable,
@@ -149,7 +149,7 @@ def pytorch_post_training_quantization(in_module: Module,
                                         fw_info)
 
         exportable_model, user_info = get_exportable_pytorch_model(graph_with_stats_correction)
-        if framework_platform_capabilities.tp_model.add_metadata:
+        if framework_platform_capabilities.tpc.add_metadata:
             exportable_model = add_metadata(exportable_model,
                                             create_model_metadata(fqc=framework_platform_capabilities,
                                                                   scheduling_info=scheduling_info))
diff --git a/model_compression_toolkit/qat/keras/quantization_facade.py b/model_compression_toolkit/qat/keras/quantization_facade.py
index a38600132..633d0a568 100644
--- a/model_compression_toolkit/qat/keras/quantization_facade.py
+++ b/model_compression_toolkit/qat/keras/quantization_facade.py
@@ -38,7 +38,7 @@
     from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
     from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
     from model_compression_toolkit.core.keras.keras_model_validation import KerasModelValidation
-    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
 
     from model_compression_toolkit.core.keras.back2framework.keras_model_builder import KerasModelBuilder
 
@@ -50,7 +50,7 @@
     from model_compression_toolkit.constants import TENSORFLOW
     from model_compression_toolkit.core.common.framework_info import FrameworkInfo
     from model_compression_toolkit.qat.common.qat_config import is_qat_applicable
-    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
     from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
     from model_compression_toolkit.qat.keras.quantizer.quantization_builder import quantization_builder, \
     get_activation_quantizer_holder
@@ -58,7 +58,7 @@
     from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
         AttachTpcToKeras
 
-    DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
+    DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
 
 
     def qat_wrapper(n: common.BaseNode,
diff --git a/model_compression_toolkit/qat/pytorch/quantization_facade.py b/model_compression_toolkit/qat/pytorch/quantization_facade.py
index 9cbce0609..1ea101158 100644
--- a/model_compression_toolkit/qat/pytorch/quantization_facade.py
+++ b/model_compression_toolkit/qat/pytorch/quantization_facade.py
@@ -40,7 +40,7 @@
     from torch.nn import Module
     from mct_quantizers import PytorchActivationQuantizationHolder
     from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
-    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
     from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
     from model_compression_toolkit.qat.common.qat_config import is_qat_applicable
     from model_compression_toolkit.core.pytorch.back2framework.pytorch_model_builder import PyTorchModelBuilder
@@ -50,7 +50,7 @@
     from model_compression_toolkit.qat.pytorch.quantizer.quantization_builder import get_activation_quantizer_holder
     from model_compression_toolkit.qat.pytorch.quantizer.quantization_builder import quantization_builder
 
-    DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
+    DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
 
 
     def qat_wrapper(n: common.BaseNode,
diff --git a/model_compression_toolkit/target_platform_capabilities/README.md b/model_compression_toolkit/target_platform_capabilities/README.md
index 6689cb148..fc8d973b7 100644
--- a/model_compression_toolkit/target_platform_capabilities/README.md
+++ b/model_compression_toolkit/target_platform_capabilities/README.md
@@ -21,9 +21,9 @@ Currently, MCT contains three target-platform models
 The default target-platform model is [IMX500](https://developer.sony.com/develop/imx500/), quantizes activations using 8 bits with power-of-two thresholds for 
 activations and symmetric threshold for weights.
 For mixed-precision quantization it uses either 2, 4, or 8 bits for quantizing the operators.
-One may view the full default target-platform model and its parameters [here](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py).
+One may view the full default target-platform model and its parameters [here](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py).
 
-[TFLite](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py) and [QNNPACK](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py) models were created similarly and were used to create two TPCs: One for Keras TPC and one for PyTorch TPC (for each model, this 8 in total).
+[TFLite](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py) and [QNNPACK](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py) models were created similarly and were used to create two TPCs: One for Keras TPC and one for PyTorch TPC (for each model, this 8 in total).
 
 ## Usage
 
diff --git a/model_compression_toolkit/target_platform_capabilities/constants.py b/model_compression_toolkit/target_platform_capabilities/constants.py
index 83a2d6073..44fc1a5b1 100644
--- a/model_compression_toolkit/target_platform_capabilities/constants.py
+++ b/model_compression_toolkit/target_platform_capabilities/constants.py
@@ -21,10 +21,10 @@
 
 
 # Supported TP models names:
-DEFAULT_TP_MODEL = 'default'
-IMX500_TP_MODEL = 'imx500'
-TFLITE_TP_MODEL = 'tflite'
-QNNPACK_TP_MODEL = 'qnnpack'
+DEFAULT_TPC = 'default'
+IMX500_TPC = 'imx500'
+TFLITE_TPC = 'tflite'
+QNNPACK_TPC = 'qnnpack'
 
 # TP Attributes
 KERNEL_ATTR = "kernel_attr"
diff --git a/model_compression_toolkit/target_platform_capabilities/schema/schema_functions.py b/model_compression_toolkit/target_platform_capabilities/schema/schema_functions.py
index c512f9a47..ebb7d0620 100644
--- a/model_compression_toolkit/target_platform_capabilities/schema/schema_functions.py
+++ b/model_compression_toolkit/target_platform_capabilities/schema/schema_functions.py
@@ -32,31 +32,31 @@ def max_input_activation_n_bits(op_quantization_config: OpQuantizationConfig) ->
     return max(op_quantization_config.supported_input_activation_n_bits)
 
 
-def get_config_options_by_operators_set(tp_model: TargetPlatformCapabilities,
+def get_config_options_by_operators_set(tpc: TargetPlatformCapabilities,
                                         operators_set_name: str) -> QuantizationConfigOptions:
     """
     Get the QuantizationConfigOptions of an OperatorsSet by its name.
 
     Args:
-        tp_model (TargetPlatformCapabilities): The target platform model containing the operator sets and their configurations.
+        tpc (TargetPlatformCapabilities): The target platform model containing the operator sets and their configurations.
         operators_set_name (str): The name of the OperatorsSet whose quantization configuration options are to be retrieved.
 
     Returns:
         QuantizationConfigOptions: The quantization configuration options associated with the specified OperatorsSet,
         or the default quantization configuration options if the OperatorsSet is not found.
     """
-    for op_set in tp_model.operator_set:
+    for op_set in tpc.operator_set:
         if operators_set_name == op_set.name:
             return op_set.qc_options
-    return tp_model.default_qco
+    return tpc.default_qco
 
 
-def get_default_op_quantization_config(tp_model: TargetPlatformCapabilities) -> OpQuantizationConfig:
+def get_default_op_quantization_config(tpc: TargetPlatformCapabilities) -> OpQuantizationConfig:
     """
     Get the default OpQuantizationConfig of the TargetPlatformCapabilities.
 
     Args:
-        tp_model (TargetPlatformCapabilities): The target platform model containing the default quantization configuration.
+        tpc (TargetPlatformCapabilities): The target platform model containing the default quantization configuration.
 
     Returns:
         OpQuantizationConfig: The default quantization configuration.
@@ -64,32 +64,32 @@ def get_default_op_quantization_config(tp_model: TargetPlatformCapabilities) ->
     Raises:
         AssertionError: If the default quantization configuration list contains more than one configuration option.
     """
-    assert len(tp_model.default_qco.quantization_configurations) == 1, \
+    assert len(tpc.default_qco.quantization_configurations) == 1, \
         f"Default quantization configuration options must contain only one option, " \
-        f"but found {len(tp_model.default_qco.quantization_configurations)} configurations." # pragma: no cover
-    return tp_model.default_qco.quantization_configurations[0]
+        f"but found {len(tpc.default_qco.quantization_configurations)} configurations." # pragma: no cover
+    return tpc.default_qco.quantization_configurations[0]
 
 
-def is_opset_in_model(tp_model: TargetPlatformCapabilities, opset_name: str) -> bool:
+def is_opset_in_model(tpc: TargetPlatformCapabilities, opset_name: str) -> bool:
     """
     Check whether an OperatorsSet is defined in the model.
 
     Args:
-        tp_model (TargetPlatformCapabilities): The target platform model containing the list of operator sets.
+        tpc (TargetPlatformCapabilities): The target platform model containing the list of operator sets.
         opset_name (str): The name of the OperatorsSet to check for existence.
 
     Returns:
         bool: True if an OperatorsSet with the given name exists in the target platform model,
               otherwise False.
     """
-    return tp_model.operator_set is not None and opset_name in [x.name for x in tp_model.operator_set]
+    return tpc.operator_set is not None and opset_name in [x.name for x in tpc.operator_set]
 
-def get_opset_by_name(tp_model: TargetPlatformCapabilities, opset_name: str) -> Optional[OperatorsSetBase]:
+def get_opset_by_name(tpc: TargetPlatformCapabilities, opset_name: str) -> Optional[OperatorsSetBase]:
     """
     Get an OperatorsSet object from the model by its name.
 
     Args:
-        tp_model (TargetPlatformCapabilities): The target platform model containing the list of operator sets.
+        tpc (TargetPlatformCapabilities): The target platform model containing the list of operator sets.
         opset_name (str): The name of the OperatorsSet to be retrieved.
 
     Returns:
@@ -99,7 +99,7 @@ def get_opset_by_name(tp_model: TargetPlatformCapabilities, opset_name: str) ->
     Raises:
         A critical log message if multiple operator sets with the same name are found.
     """
-    opset_list = [x for x in tp_model.operator_set if x.name == opset_name]
+    opset_list = [x for x in tpc.operator_set if x.name == opset_name]
     if len(opset_list) > 1:
         Logger.critical(f"Found more than one OperatorsSet in TargetPlatformCapabilities with the name {opset_name}.") # pragma: no cover
     return opset_list[0] if opset_list else None
diff --git a/model_compression_toolkit/target_platform_capabilities/schema/v1.py b/model_compression_toolkit/target_platform_capabilities/schema/v1.py
index c57f1fd7e..fbbf5a2fb 100644
--- a/model_compression_toolkit/target_platform_capabilities/schema/v1.py
+++ b/model_compression_toolkit/target_platform_capabilities/schema/v1.py
@@ -644,7 +644,7 @@ class TargetPlatformCapabilities(BaseModel):
     tpc_patch_version: Optional[int]
     tpc_platform_type: Optional[str]
     add_metadata: bool = True
-    name: Optional[str] = "default_tp_model"
+    name: Optional[str] = "default_tpc"
     is_simd_padding: bool = False
 
     SCHEMA_VERSION: int = 1
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities.py b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities.py
index 8b1b4b9aa..3b0d8e417 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities.py
+++ b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities.py
@@ -36,23 +36,23 @@ class FrameworkQuantizationCapabilities(ImmutableClass):
     Attach framework information to a modeled hardware.
     """
     def __init__(self,
-                 tp_model: TargetPlatformCapabilities,
+                 tpc: TargetPlatformCapabilities,
                  name: str = "base"):
         """
 
         Args:
-            tp_model (TargetPlatformCapabilities): Modeled hardware to attach framework information to.
+            tpc (TargetPlatformCapabilities): Modeled hardware to attach framework information to.
             name (str): Name of the FrameworkQuantizationCapabilities.
         """
 
         super().__init__()
         self.name = name
-        assert isinstance(tp_model, TargetPlatformCapabilities), f'Target platform model that was passed to FrameworkQuantizationCapabilities must be of type TargetPlatformCapabilities, but has type of {type(tp_model)}'
-        self.tp_model = tp_model
+        assert isinstance(tpc, TargetPlatformCapabilities), f'Target platform model that was passed to FrameworkQuantizationCapabilities must be of type TargetPlatformCapabilities, but has type of {type(tpc)}'
+        self.tpc = tpc
         self.op_sets_to_layers = OperationsToLayers() # Init an empty OperationsToLayers
         self.layer2qco, self.filterlayer2qco = {}, {} # Init empty mappings from layers/LayerFilterParams to QC options
         # Track the unused opsets for warning purposes.
-        self.__tp_model_opsets_not_used = [s.name for s in tp_model.operator_set]
+        self.__tpc_opsets_not_used = [s.name for s in tpc.operator_set]
         self.remove_fusing_names_from_not_used_list()
 
     def get_layers_by_opset_name(self, opset_name: str) -> List[Any]:
@@ -66,7 +66,7 @@ def get_layers_by_opset_name(self, opset_name: str) -> List[Any]:
         Returns:
             List of layers/LayerFilterParams that are attached to the opset name.
         """
-        opset = get_opset_by_name(self.tp_model, opset_name)
+        opset = get_opset_by_name(self.tpc, opset_name)
         if opset is None:
             Logger.warning(f'{opset_name} was not found in FrameworkQuantizationCapabilities.')
             return None
@@ -100,9 +100,9 @@ def get_fusing_patterns(self) -> List[List[Any]]:
 
         """
         res = []
-        if self.tp_model.fusing_patterns is None:
+        if self.tpc.fusing_patterns is None:
             return res
-        for p in self.tp_model.fusing_patterns:
+        for p in self.tpc.fusing_patterns:
             ops = [self.get_layers_by_opset(x) for x in p.operator_groups]
             res.extend(itertools.product(*ops))
         return [list(x) for x in res]
@@ -115,10 +115,10 @@ def get_info(self) -> Dict[str, Any]:
 
         """
         return {"Target Platform Capabilities": self.name,
-                "Minor version": self.tp_model.tpc_minor_version,
-                "Patch version": self.tp_model.tpc_patch_version,
-                "Platform type": self.tp_model.tpc_platform_type,
-                "Target Platform Model": self.tp_model.get_info(),
+                "Minor version": self.tpc.tpc_minor_version,
+                "Patch version": self.tpc.tpc_patch_version,
+                "Platform type": self.tpc.tpc_platform_type,
+                "Target Platform Model": self.tpc.get_info(),
                 "Operations to layers": {op2layer.name:[l.__name__ for l in op2layer.layers] for op2layer in self.op_sets_to_layers.op_sets_to_layers}}
 
     def show(self):
@@ -168,7 +168,7 @@ def get_default_op_qc(self) -> OpQuantizationConfig:
         to the FrameworkQuantizationCapabilities.
 
         """
-        return get_default_op_quantization_config(self.tp_model)
+        return get_default_op_quantization_config(self.tpc)
 
 
     def _get_config_options_mapping(self) -> Tuple[Dict[Any, QuantizationConfigOptions],
@@ -184,9 +184,9 @@ def _get_config_options_mapping(self) -> Tuple[Dict[Any, QuantizationConfigOptio
         filterlayer2qco = {}
         for op2layers in self.op_sets_to_layers.op_sets_to_layers:
             for l in op2layers.layers:
-                qco = get_config_options_by_operators_set(self.tp_model, op2layers.name)
+                qco = get_config_options_by_operators_set(self.tpc, op2layers.name)
                 if qco is None:
-                    qco = self.tp_model.default_qco
+                    qco = self.tpc.default_qco
 
                 # here, we need to take care of mapping a general attribute name into a framework and
                 # layer type specific attribute name.
@@ -208,8 +208,8 @@ def remove_fusing_names_from_not_used_list(self):
         Remove OperatorSets names from the list of the unused sets (so a warning
         will not be displayed).
         """
-        if self.tp_model.fusing_patterns is not None:
-            for f in self.tp_model.fusing_patterns:
+        if self.tpc.fusing_patterns is not None:
+            for f in self.tpc.fusing_patterns:
                 for s in f.operator_groups:
                     self.remove_opset_from_not_used_list(s.name)
 
@@ -222,8 +222,8 @@ def remove_opset_from_not_used_list(self,
             opset_to_remove: OperatorsSet name to remove.
 
         """
-        if opset_to_remove in self.__tp_model_opsets_not_used:
-            self.__tp_model_opsets_not_used.remove(opset_to_remove)
+        if opset_to_remove in self.__tpc_opsets_not_used:
+            self.__tpc_opsets_not_used.remove(opset_to_remove)
 
     @property
     def is_simd_padding(self) -> bool:
@@ -232,4 +232,4 @@ def is_simd_padding(self) -> bool:
         Returns: Check if the TP model defines that padding due to SIMD constrains occurs.
 
         """
-        return self.tp_model.is_simd_padding
+        return self.tpc.is_simd_padding
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py
index 9c742306c..be1f57190 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py
+++ b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py
@@ -144,7 +144,7 @@ def validate_op_sets(self):
 
             # Assert that a layer does not appear in more than a single OperatorsSet in the TargetPlatformCapabilities.
             for layer in ops2layers.layers:
-                qco_by_opset_name = get_config_options_by_operators_set(_current_tpc.get().tp_model, ops2layers.name)
+                qco_by_opset_name = get_config_options_by_operators_set(_current_tpc.get().tpc, ops2layers.name)
                 if layer in existing_layers:
                     Logger.critical(f'Found layer {layer.__name__} in more than one '
                                     f'OperatorsSet')  # pragma: no cover
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_io_handler.py b/model_compression_toolkit/target_platform_capabilities/tpc_io_handler.py
index 9d842da1e..0a844d035 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_io_handler.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_io_handler.py
@@ -20,13 +20,13 @@
 import json
 
 
-def load_target_platform_model(tp_model_or_path: Union[TargetPlatformCapabilities, str]) -> TargetPlatformCapabilities:
+def load_target_platform_model(tpc_obj_or_path: Union[TargetPlatformCapabilities, str]) -> TargetPlatformCapabilities:
     """
-        Parses the tp_model input, which can be either a TargetPlatformCapabilities object
+        Parses the tpc input, which can be either a TargetPlatformCapabilities object
         or a string path to a JSON file.
 
         Parameters:
-            tp_model_or_path (Union[TargetPlatformModel, str]): Input target platform model or path to .JSON file.
+            tpc_obj_or_path (Union[TargetPlatformModel, str]): Input target platform model or path to .JSON file.
 
         Returns:
             TargetPlatformCapabilities: The parsed TargetPlatformCapabilities.
@@ -36,14 +36,14 @@ def load_target_platform_model(tp_model_or_path: Union[TargetPlatformCapabilitie
             ValueError: If the JSON content is invalid or cannot initialize the TargetPlatformCapabilities.
             TypeError: If the input is neither a TargetPlatformCapabilities nor a valid JSON file path.
         """
-    if isinstance(tp_model_or_path, TargetPlatformCapabilities):
-        return tp_model_or_path
+    if isinstance(tpc_obj_or_path, TargetPlatformCapabilities):
+        return tpc_obj_or_path
 
-    if isinstance(tp_model_or_path, str):
-        path = Path(tp_model_or_path)
+    if isinstance(tpc_obj_or_path, str):
+        path = Path(tpc_obj_or_path)
 
         if not path.exists() or not path.is_file():
-            raise FileNotFoundError(f"The path '{tp_model_or_path}' is not a valid file.")
+            raise FileNotFoundError(f"The path '{tpc_obj_or_path}' is not a valid file.")
         # Verify that the file has a .json extension
         if path.suffix.lower() != '.json':
             raise ValueError(f"The file '{path}' does not have a '.json' extension.")
@@ -51,18 +51,18 @@ def load_target_platform_model(tp_model_or_path: Union[TargetPlatformCapabilitie
             with path.open('r', encoding='utf-8') as file:
                 data = file.read()
         except OSError as e:
-            raise ValueError(f"Error reading the file '{tp_model_or_path}': {e.strerror}.") from e
+            raise ValueError(f"Error reading the file '{tpc_obj_or_path}': {e.strerror}.") from e
 
         try:
             return TargetPlatformCapabilities.parse_raw(data)
         except ValueError as e:
-            raise ValueError(f"Invalid JSON for loading TargetPlatformCapabilities in '{tp_model_or_path}': {e}.") from e
+            raise ValueError(f"Invalid JSON for loading TargetPlatformCapabilities in '{tpc_obj_or_path}': {e}.") from e
         except Exception as e:
             raise ValueError(f"Unexpected error while initializing TargetPlatformCapabilities: {e}.") from e
 
     raise TypeError(
-        f"tp_model_or_path must be either a TargetPlatformCapabilities instance or a string path to a JSON file, "
-        f"but received type '{type(tp_model_or_path).__name__}'."
+        f"tpc_obj_or_path must be either a TargetPlatformCapabilities instance or a string path to a JSON file, "
+        f"but received type '{type(tpc_obj_or_path).__name__}'."
     )
 
 
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py
index f71ab0fe2..f51cd7169 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py
@@ -13,13 +13,13 @@
 # limitations under the License.
 # ==============================================================================
 from model_compression_toolkit.constants import TENSORFLOW, PYTORCH
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL, IMX500_TP_MODEL, \
-    TFLITE_TP_MODEL, QNNPACK_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC, IMX500_TPC, \
+    TFLITE_TPC, QNNPACK_TPC
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 
-from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1.tp_model import get_tp_model as get_tp_model_imx500_v1
-from model_compression_toolkit.target_platform_capabilities.tpc_models.tflite_tpc.v1.tp_model import get_tp_model as get_tp_model_tflite_v1
-from model_compression_toolkit.target_platform_capabilities.tpc_models.qnnpack_tpc.v1.tp_model import get_tp_model as get_tp_model_qnnpack_v1
+from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1.tpc import get_tpc as get_tpc_imx500_v1
+from model_compression_toolkit.target_platform_capabilities.tpc_models.tflite_tpc.v1.tpc import get_tpc as get_tpc_tflite_v1
+from model_compression_toolkit.target_platform_capabilities.tpc_models.qnnpack_tpc.v1.tpc import get_tpc as get_tpc_qnnpack_v1
 
 
 # TODO: These methods need to be replaced once modifying the TPC API.
@@ -42,33 +42,33 @@ def get_target_platform_capabilities(fw_name: str,
 
     assert fw_name in [TENSORFLOW, PYTORCH], f"Unsupported framework {fw_name}."
 
-    if target_platform_name == DEFAULT_TP_MODEL:
-        return get_tp_model_imx500_v1()
+    if target_platform_name == DEFAULT_TPC:
+        return get_tpc_imx500_v1()
 
     assert target_platform_version == 'v1' or target_platform_version is None, \
         "The usage of get_target_platform_capabilities API is supported only with the default TPC ('v1')."
 
-    if target_platform_name == IMX500_TP_MODEL:
-        return get_tp_model_imx500_v1()
-    elif target_platform_name == TFLITE_TP_MODEL:
-        return get_tp_model_tflite_v1()
-    elif target_platform_name == QNNPACK_TP_MODEL:
-        return get_tp_model_qnnpack_v1()
+    if target_platform_name == IMX500_TPC:
+        return get_tpc_imx500_v1()
+    elif target_platform_name == TFLITE_TPC:
+        return get_tpc_tflite_v1()
+    elif target_platform_name == QNNPACK_TPC:
+        return get_tpc_qnnpack_v1()
 
     raise ValueError(f"Unsupported target platform name {target_platform_name}.")
 
 
-def get_tpc_model(name: str, tp_model: TargetPlatformCapabilities):
+def get_tpc_model(name: str, tpc: TargetPlatformCapabilities):
     """
     This is a utility method that just returns the TargetPlatformCapabilities that it receives, to support existing TPC API.
 
     Args:
         name: the name of the TargetPlatformCapabilities (not used in this function).
-        tp_model: a TargetPlatformCapabilities to return.
+        tpc: a TargetPlatformCapabilities to return.
 
     Returns:
         The given TargetPlatformCapabilities object.
 
     """
 
-    return tp_model
+    return tpc
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py
index 797491b47..4692f56da 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/latest/__init__.py
@@ -13,13 +13,13 @@
 # limitations under the License.
 # ==============================================================================
 from model_compression_toolkit.verify_packages import FOUND_TORCH, FOUND_TF
-from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1.tp_model import get_tp_model, generate_tp_model, \
+from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1.tpc import get_tpc, generate_tpc, \
     get_op_quantization_configs
 if FOUND_TF:
-    from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1.tp_model import get_tp_model as get_keras_tpc_latest
+    from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1.tpc import get_tpc as get_keras_tpc_latest
     from model_compression_toolkit.target_platform_capabilities.tpc_models.get_target_platform_capabilities import \
         get_tpc_model as generate_keras_tpc
 if FOUND_TORCH:
-    from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1.tp_model import get_tp_model as get_pytorch_tpc_latest
+    from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1.tpc import get_tpc as get_pytorch_tpc_latest
     from model_compression_toolkit.target_platform_capabilities.tpc_models.get_target_platform_capabilities import \
         get_tpc_model as generate_pytorch_tpc
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py
similarity index 95%
rename from model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py
rename to model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py
index 8aad40644..d7e489b54 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tp_model.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py
@@ -18,29 +18,29 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    IMX500_TP_MODEL
+    IMX500_TPC
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformCapabilities:
+def get_tpc() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
     NOTE: in order to generate a target platform model with different configurations but with the same Operators Sets
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
-    'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
+    'get_op_quantization_configs' method and use its output to call 'generate_tpc' with your configurations.
 
     Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
-    return generate_tp_model(default_config=default_config,
-                             base_config=base_config,
-                             mixed_precision_cfg_list=mixed_precision_cfg_list,
-                             name='imx500_tp_model')
+    return generate_tpc(default_config=default_config,
+                        base_config=base_config,
+                        mixed_precision_cfg_list=mixed_precision_cfg_list,
+                        name='imx500_tpc')
 
 
 def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
@@ -131,10 +131,10 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     return linear_eight_bits, mixed_precision_cfg_list, eight_bits_default
 
 
-def generate_tp_model(default_config: OpQuantizationConfig,
-                      base_config: OpQuantizationConfig,
-                      mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformCapabilities:
+def generate_tpc(default_config: OpQuantizationConfig,
+                 base_config: OpQuantizationConfig,
+                 mixed_precision_cfg_list: List[OpQuantizationConfig],
+                 name: str) -> TargetPlatformCapabilities:
     """
     Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
@@ -241,7 +241,7 @@ def generate_tp_model(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=1,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TP_MODEL,
+        tpc_platform_type=IMX500_TPC,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         name=name,
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py
index a35f93d7e..1e77471a4 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/latest/__init__.py
@@ -13,14 +13,14 @@
 # limitations under the License.
 # ==============================================================================
 from model_compression_toolkit.verify_packages import FOUND_TORCH, FOUND_TF
-from model_compression_toolkit.target_platform_capabilities.tpc_models.qnnpack_tpc.v1.tp_model import get_tp_model, generate_tp_model, get_op_quantization_configs
+from model_compression_toolkit.target_platform_capabilities.tpc_models.qnnpack_tpc.v1.tpc import get_tpc, generate_tpc, get_op_quantization_configs
 if FOUND_TF:
-    from model_compression_toolkit.target_platform_capabilities.tpc_models.qnnpack_tpc.v1.tp_model import get_tp_model as \
+    from model_compression_toolkit.target_platform_capabilities.tpc_models.qnnpack_tpc.v1.tpc import get_tpc as \
         get_keras_tpc_latest
     from model_compression_toolkit.target_platform_capabilities.tpc_models.get_target_platform_capabilities import \
         get_tpc_model as generate_keras_tpc, get_tpc_model as generate_keras_tpc
 if FOUND_TORCH:
-    from model_compression_toolkit.target_platform_capabilities.tpc_models.qnnpack_tpc.v1.tp_model import get_tp_model as \
+    from model_compression_toolkit.target_platform_capabilities.tpc_models.qnnpack_tpc.v1.tpc import get_tpc as \
         get_pytorch_tpc_latest
     from model_compression_toolkit.target_platform_capabilities.tpc_models.get_target_platform_capabilities import \
         get_tpc_model as generate_pytorch_tpc, get_tpc_model as generate_pytorch_tpc
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py
similarity index 92%
rename from model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py
rename to model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py
index f2d5572dd..e75d89970 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tp_model.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py
@@ -17,7 +17,7 @@
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
-from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, QNNPACK_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, QNNPACK_TPC
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
@@ -25,22 +25,22 @@
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformCapabilities:
+def get_tpc() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
     NOTE: in order to generate a target platform model with different configurations but with the same Operators Sets
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
-    'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
+    'get_op_quantization_configs' method and use its output to call 'generate_tpc' with your configurations.
 
     Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
-    return generate_tp_model(default_config=default_config,
-                             base_config=base_config,
-                             mixed_precision_cfg_list=mixed_precision_cfg_list,
-                             name='qnnpack_tp_model')
+    return generate_tpc(default_config=default_config,
+                        base_config=base_config,
+                        mixed_precision_cfg_list=mixed_precision_cfg_list,
+                        name='qnnpack_tpc')
 
 
 def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
@@ -117,10 +117,10 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     return linear_eight_bits, mixed_precision_cfg_list, eight_bits_default
 
 
-def generate_tp_model(default_config: OpQuantizationConfig,
-                      base_config: OpQuantizationConfig,
-                      mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformCapabilities:
+def generate_tpc(default_config: OpQuantizationConfig,
+                 base_config: OpQuantizationConfig,
+                 mixed_precision_cfg_list: List[OpQuantizationConfig],
+                 name: str) -> TargetPlatformCapabilities:
     """
     Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
@@ -178,7 +178,7 @@ def generate_tp_model(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=1,
         tpc_patch_version=0,
-        tpc_platform_type=QNNPACK_TP_MODEL,
+        tpc_platform_type=QNNPACK_TPC,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=False,
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py
index 6525c6aa6..6ec57c6e9 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/latest/__init__.py
@@ -13,13 +13,13 @@
 # limitations under the License.
 # ==============================================================================
 from model_compression_toolkit.verify_packages import FOUND_TORCH, FOUND_TF
-from model_compression_toolkit.target_platform_capabilities.tpc_models.tflite_tpc.v1.tp_model import get_tp_model, generate_tp_model, get_op_quantization_configs
+from model_compression_toolkit.target_platform_capabilities.tpc_models.tflite_tpc.v1.tpc import get_tpc, generate_tpc, get_op_quantization_configs
 if FOUND_TF:
-    from model_compression_toolkit.target_platform_capabilities.tpc_models.tflite_tpc.v1.tp_model import get_keras_tpc as get_keras_tpc_latest
+    from model_compression_toolkit.target_platform_capabilities.tpc_models.tflite_tpc.v1.tpc import get_keras_tpc as get_keras_tpc_latest
     from model_compression_toolkit.target_platform_capabilities.tpc_models.get_target_platform_capabilities import \
         get_tpc_model as generate_keras_tpc
 if FOUND_TORCH:
-    from model_compression_toolkit.target_platform_capabilities.tpc_models.tflite_tpc.v1.tp_model import \
-        get_tp_model as get_pytorch_tpc_latest
+    from model_compression_toolkit.target_platform_capabilities.tpc_models.tflite_tpc.v1.tpc import \
+        get_tpc as get_pytorch_tpc_latest
     from model_compression_toolkit.target_platform_capabilities.tpc_models.get_target_platform_capabilities import \
         get_tpc_model as generate_pytorch_tpc
\ No newline at end of file
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py
similarity index 94%
rename from model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py
rename to model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py
index d3b47ce2d..7a8a1bc4e 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tp_model.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py
@@ -17,29 +17,29 @@
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
-from model_compression_toolkit.target_platform_capabilities.constants import BIAS_ATTR, KERNEL_ATTR, TFLITE_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import BIAS_ATTR, KERNEL_ATTR, TFLITE_TPC
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformCapabilities:
+def get_tpc() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
     NOTE: in order to generate a target platform model with different configurations but with the same Operators Sets
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
-    'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
+    'get_op_quantization_configs' method and use its output to call 'generate_tpc' with your configurations.
 
     Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
-    return generate_tp_model(default_config=default_config,
-                             base_config=base_config,
-                             mixed_precision_cfg_list=mixed_precision_cfg_list,
-                             name='tflite_tp_model')
+    return generate_tpc(default_config=default_config,
+                        base_config=base_config,
+                        mixed_precision_cfg_list=mixed_precision_cfg_list,
+                        name='tflite_tpc')
 
 
 def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
@@ -114,10 +114,10 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     return linear_eight_bits, mixed_precision_cfg_list, eight_bits_default
 
 
-def generate_tp_model(default_config: OpQuantizationConfig,
-                      base_config: OpQuantizationConfig,
-                      mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformCapabilities:
+def generate_tpc(default_config: OpQuantizationConfig,
+                 base_config: OpQuantizationConfig,
+                 mixed_precision_cfg_list: List[OpQuantizationConfig],
+                 name: str) -> TargetPlatformCapabilities:
     """
     Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
@@ -218,7 +218,7 @@ def generate_tp_model(default_config: OpQuantizationConfig,
         tpc_patch_version=0,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
-        tpc_platform_type=TFLITE_TP_MODEL,
+        tpc_platform_type=TFLITE_TPC,
         add_metadata=False,
         name=name)
 
diff --git a/model_compression_toolkit/xquant/keras/keras_report_utils.py b/model_compression_toolkit/xquant/keras/keras_report_utils.py
index 73c249b6f..a83c7906e 100644
--- a/model_compression_toolkit/xquant/keras/keras_report_utils.py
+++ b/model_compression_toolkit/xquant/keras/keras_report_utils.py
@@ -26,7 +26,7 @@
 from model_compression_toolkit.xquant.keras.similarity_functions import KerasSimilarityFunctions
 from model_compression_toolkit.xquant.keras.tensorboard_utils import KerasTensorboardUtils
 from mct_quantizers.keras.metadata import get_metadata
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 
@@ -44,7 +44,7 @@ def __init__(self, report_dir: str):
         fw_impl = KerasImplementation()
 
         # Set the default Target Platform Capabilities (TPC) for Keras.
-        default_tpc = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
+        default_tpc = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
         attach2pytorch = AttachTpcToKeras()
         framework_platform_capabilities = attach2pytorch.attach(default_tpc)
 
diff --git a/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py b/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py
index 454b83f10..3aa737462 100644
--- a/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py
+++ b/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py
@@ -15,7 +15,7 @@
 from model_compression_toolkit import get_target_platform_capabilities
 from model_compression_toolkit.constants import PYTORCH
 from model_compression_toolkit.core.pytorch.utils import get_working_device
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
 
@@ -42,7 +42,7 @@ def __init__(self, report_dir: str):
         fw_info = DEFAULT_PYTORCH_INFO
         fw_impl = PytorchImplementation()
         # Set the default Target Platform Capabilities (TPC) for PyTorch.
-        default_tpc = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
+        default_tpc = get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
         attach2pytorch = AttachTpcToPytorch()
         framework_quantization_capabilities = attach2pytorch.attach(default_tpc)
 
diff --git a/tests/common_tests/helpers/generate_test_tp_model.py b/tests/common_tests/helpers/generate_test_tpc.py
similarity index 84%
rename from tests/common_tests/helpers/generate_test_tp_model.py
rename to tests/common_tests/helpers/generate_test_tpc.py
index aee875bf5..2b6276514 100644
--- a/tests/common_tests/helpers/generate_test_tp_model.py
+++ b/tests/common_tests/helpers/generate_test_tpc.py
@@ -22,7 +22,7 @@
     WEIGHTS_N_BITS
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import Signedness, OpQuantizationConfig, \
     QuantizationConfigOptions
-from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs, generate_tp_model
+from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs, generate_tpc
 import model_compression_toolkit as mct
 
 tp = mct.target_platform
@@ -32,7 +32,7 @@
 BIAS_CONFIG = 'bias_config'
 
 
-def generate_test_tp_model(edit_params_dict, name=""):
+def generate_test_tpc(edit_params_dict, name=""):
     # Add "supported_input_activation_n_bits" to match "activation_n_bits" if not defined.
     if ACTIVATION_N_BITS_ATTRIBUTE in edit_params_dict and SUPPORTED_INPUT_ACTIVATION_NBITS_ATTRIBUTE not in edit_params_dict:
         edit_params_dict[SUPPORTED_INPUT_ACTIVATION_NBITS_ATTRIBUTE] = (edit_params_dict[ACTIVATION_N_BITS_ATTRIBUTE],)
@@ -60,13 +60,13 @@ def generate_test_tp_model(edit_params_dict, name=""):
     # this method only used for non-mixed-precision tests
     op_cfg_list = [updated_config]
 
-    return generate_tp_model(default_config=updated_default_config,
-                             base_config=updated_config,
-                             mixed_precision_cfg_list=op_cfg_list,
-                             name=name)
+    return generate_tpc(default_config=updated_default_config,
+                        base_config=updated_config,
+                        mixed_precision_cfg_list=op_cfg_list,
+                        name=name)
 
 
-def generate_mixed_precision_test_tp_model(base_cfg, default_config, mp_bitwidth_candidates_list, name=""):
+def generate_mixed_precision_test_tpc(base_cfg, default_config, mp_bitwidth_candidates_list, name=""):
     mp_op_cfg_list = []
     for weights_n_bits, activation_n_bits in mp_bitwidth_candidates_list:
         candidate_cfg = base_cfg.clone_and_edit(attr_to_edit={KERNEL_ATTR: {WEIGHTS_N_BITS: weights_n_bits}},
@@ -78,10 +78,10 @@ def generate_mixed_precision_test_tp_model(base_cfg, default_config, mp_bitwidth
         else:
             mp_op_cfg_list.append(candidate_cfg)
 
-    return generate_tp_model(default_config=default_config,
-                             base_config=base_cfg,
-                             mixed_precision_cfg_list=mp_op_cfg_list,
-                             name=name)
+    return generate_tpc(default_config=default_config,
+                        base_config=base_cfg,
+                        mixed_precision_cfg_list=mp_op_cfg_list,
+                        name=name)
 
 
 def _op_config_quantize_activation(op_set, default_quantize_activation):
@@ -89,8 +89,8 @@ def _op_config_quantize_activation(op_set, default_quantize_activation):
             op_set.qc_options.base_config.enable_activation_quantization))
 
 
-def generate_tp_model_with_activation_mp(base_cfg, default_config, mp_bitwidth_candidates_list, custom_opsets=[],
-                                         name="activation_mp_model"):
+def generate_tpc_with_activation_mp(base_cfg, default_config, mp_bitwidth_candidates_list, custom_opsets=[],
+                                    name="activation_mp_model"):
     mp_op_cfg_list = []
     for weights_n_bits, activation_n_bits in mp_bitwidth_candidates_list:
 
@@ -106,37 +106,37 @@ def generate_tp_model_with_activation_mp(base_cfg, default_config, mp_bitwidth_c
         else:
             mp_op_cfg_list.append(candidate_cfg)
 
-    base_tp_model = generate_tp_model(default_config=default_config,
-                                      base_config=base_cfg,
-                                      mixed_precision_cfg_list=mp_op_cfg_list,
-                                      name=name)
+    base_tpc = generate_tpc(default_config=default_config,
+                                 base_config=base_cfg,
+                                 mixed_precision_cfg_list=mp_op_cfg_list,
+                                 name=name)
 
     mixed_precision_configuration_options = schema.QuantizationConfigOptions(
         quantization_configurations=tuple(mp_op_cfg_list), base_config=base_cfg)
 
     # setting only operator that already quantizing activations to mixed precision activation
-    operator_sets_dict = {op_set.name: mixed_precision_configuration_options for op_set in base_tp_model.operator_set
-                          if _op_config_quantize_activation(op_set, base_tp_model.default_qco.base_config.enable_activation_quantization)}
+    operator_sets_dict = {op_set.name: mixed_precision_configuration_options for op_set in base_tpc.operator_set
+                          if _op_config_quantize_activation(op_set, base_tpc.default_qco.base_config.enable_activation_quantization)}
 
     operator_sets_dict["Input"] = mixed_precision_configuration_options
     for c_ops in custom_opsets:
         operator_sets_dict[c_ops] = mixed_precision_configuration_options
 
-    return generate_custom_test_tp_model(name=name,
-                                         base_cfg=base_cfg,
-                                         base_tp_model=base_tp_model,
-                                         operator_sets_dict=operator_sets_dict)
+    return generate_custom_test_tpc(name=name,
+                                    base_cfg=base_cfg,
+                                    base_tpc=base_tpc,
+                                    operator_sets_dict=operator_sets_dict)
 
 
-def generate_custom_test_tp_model(name: str,
-                                  base_cfg: OpQuantizationConfig,
-                                  base_tp_model: schema.TargetPlatformCapabilities,
-                                  operator_sets_dict: Dict[str, QuantizationConfigOptions] = None):
+def generate_custom_test_tpc(name: str,
+                             base_cfg: OpQuantizationConfig,
+                             base_tpc: schema.TargetPlatformCapabilities,
+                             operator_sets_dict: Dict[str, QuantizationConfigOptions] = None):
     default_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([base_cfg]))
 
     operator_set, fusing_patterns = [], []
 
-    for op_set in base_tp_model.operator_set:
+    for op_set in base_tpc.operator_set:
         # Add existing OperatorSets from base TP model
         if operator_sets_dict is not None and operator_sets_dict.get(op_set.name) is not None:
             qc_options = operator_sets_dict[op_set.name]
@@ -145,16 +145,16 @@ def generate_custom_test_tp_model(name: str,
 
         operator_set.append(schema.OperatorsSet(name=op_set.name, qc_options=qc_options))
 
-    existing_op_sets_names = [op_set.name for op_set in base_tp_model.operator_set]
+    existing_op_sets_names = [op_set.name for op_set in base_tpc.operator_set]
     for op_set_name, op_set_qc_options in operator_sets_dict.items():
         # Add new OperatorSets from the given operator_sets_dict
         if op_set_name not in existing_op_sets_names:
             operator_set.append( schema.OperatorsSet(name=op_set_name, qc_options=op_set_qc_options))
 
-    for fusion in base_tp_model.fusing_patterns:
+    for fusion in base_tpc.fusing_patterns:
         fusing_patterns.append(schema.Fusing(operator_groups=fusion.operator_groups))
 
-    custom_tp_model = schema.TargetPlatformCapabilities(
+    custom_tpc = schema.TargetPlatformCapabilities(
         default_qco=default_configuration_options,
         tpc_minor_version=None,
         tpc_patch_version=None,
@@ -163,11 +163,11 @@ def generate_custom_test_tp_model(name: str,
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=False,
         name=name)
-    return custom_tp_model
+    return custom_tpc
 
 
 def generate_test_fqc(name: str,
-                      tp_model: schema.TargetPlatformCapabilities,
+                      tpc: schema.TargetPlatformCapabilities,
                       base_fqc: tp.FrameworkQuantizationCapabilities,
                       op_sets_to_layer_add: Dict[str, List[Any]] = None,
                       op_sets_to_layer_drop: Dict[str, List[Any]] = None,
@@ -189,7 +189,7 @@ def generate_test_fqc(name: str,
         # Remove empty op sets
         merged_dict = {op_set_name: layers for op_set_name, layers in merged_dict.items() if len(layers) == 0}
 
-    fqc = tp.FrameworkQuantizationCapabilities(tp_model)
+    fqc = tp.FrameworkQuantizationCapabilities(tpc)
 
     with fqc:
         for op_set_name, layers in merged_dict.items():
diff --git a/tests/common_tests/helpers/prep_graph_for_func_test.py b/tests/common_tests/helpers/prep_graph_for_func_test.py
index fc0aaca1b..bbb5d76a7 100644
--- a/tests/common_tests/helpers/prep_graph_for_func_test.py
+++ b/tests/common_tests/helpers/prep_graph_for_func_test.py
@@ -24,7 +24,7 @@
 from model_compression_toolkit.core.graph_prep_runner import graph_preparation_runner
 from model_compression_toolkit.core.quantization_prep_runner import quantization_preparation_runner
 
-from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_tp_model, \
+from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_tpc, \
     get_op_quantization_configs
 
 import model_compression_toolkit as mct
@@ -46,7 +46,7 @@ def prepare_graph_with_configs(in_model,
 
     # To override the default TP in the test - pass a TPC generator function that includes a generation of the TP
     # and doesn't use the TP that is passed from outside.
-    _tp = generate_tp_model(default_config, base_config, op_cfg_list, "function_test")
+    _tp = generate_tpc(default_config, base_config, op_cfg_list, "function_test")
     tpc = get_tpc_func("function_test", _tp)
 
     fqc = attach2fw.attach(tpc, qc.custom_tpc_opset_to_layer)
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py
similarity index 94%
rename from tests/common_tests/helpers/tpcs_for_tests/v1/tp_model.py
rename to tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py
index d026c4f0f..be841ab5f 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py
@@ -18,29 +18,29 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    IMX500_TP_MODEL
+    IMX500_TPC
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformCapabilities:
+def get_tpc() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
     NOTE: in order to generate a target platform model with different configurations but with the same Operators Sets
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
-    'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
+    'get_op_quantization_configs' method and use its output to call 'generate_tpc' with your configurations.
 
     Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
-    return generate_tp_model(default_config=default_config,
-                             base_config=base_config,
-                             mixed_precision_cfg_list=mixed_precision_cfg_list,
-                             name='imx500_tp_model')
+    return generate_tpc(default_config=default_config,
+                        base_config=base_config,
+                        mixed_precision_cfg_list=mixed_precision_cfg_list,
+                        name='imx500_tpc')
 
 
 def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
@@ -56,7 +56,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     # TODO: currently, we don't want to quantize any attribute but the kernel by default,
     #  to preserve the current behavior of MCT, so quantization is disabled for all other attributes.
     #  Other quantization parameters are set to what we eventually want to quantize by default
-    #  when we enable multi-attributes quantization - THIS NEED TO BE MODIFIED IN ALL TP MODELS!
+    #  when we enable multi-attributes quantization - THIS NEED TO BE MODIFIED IN ALL TPCS!
 
     # define a default quantization config for all non-specified weights attributes.
     default_weight_attr_config = AttributeQuantizationConfig(
@@ -131,18 +131,18 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     return linear_eight_bits, mixed_precision_cfg_list, eight_bits_default
 
 
-def generate_tp_model(default_config: OpQuantizationConfig,
-                      base_config: OpQuantizationConfig,
-                      mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformCapabilities:
+def generate_tpc(default_config: OpQuantizationConfig,
+                 base_config: OpQuantizationConfig,
+                 mixed_precision_cfg_list: List[OpQuantizationConfig],
+                 name: str) -> TargetPlatformCapabilities:
     """
     Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
-        default_config: A default OpQuantizationConfig to set as the TP model default configuration.
+        default_config: A default OpQuantizationConfig to set as the TPC default configuration.
         base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
-        mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
+        mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TPC mixed-precision
             quantization configuration options.
         name: The name of the TargetPlatformCapabilities.
 
@@ -242,7 +242,7 @@ def generate_tp_model(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=1,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TP_MODEL,
+        tpc_platform_type=IMX500_TPC,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         name=name,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py
similarity index 95%
rename from tests/common_tests/helpers/tpcs_for_tests/v1_lut/tp_model.py
rename to tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py
index 76a7a9045..b2f09c611 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    WEIGHTS_QUANTIZATION_METHOD, IMX500_TP_MODEL
+    WEIGHTS_QUANTIZATION_METHOD, IMX500_TPC
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
@@ -26,22 +26,22 @@
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformCapabilities:
+def get_tpc() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
     NOTE: in order to generate a target platform model with different configurations but with the same Operators Sets
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
-    'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
+    'get_op_quantization_configs' method and use its output to call 'generate_tpc' with your configurations.
 
     Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
-    return generate_tp_model(default_config=default_config,
-                             base_config=base_config,
-                             mixed_precision_cfg_list=mixed_precision_cfg_list,
-                             name='imx500_lut_tp_model')
+    return generate_tpc(default_config=default_config,
+                        base_config=base_config,
+                        mixed_precision_cfg_list=mixed_precision_cfg_list,
+                        name='imx500_lut_tpc')
 
 
 def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
@@ -129,18 +129,18 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     return linear_eight_bits, mixed_precision_cfg_list, eight_bits_default
 
 
-def generate_tp_model(default_config: OpQuantizationConfig,
-                      base_config: OpQuantizationConfig,
-                      mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformCapabilities:
+def generate_tpc(default_config: OpQuantizationConfig,
+                 base_config: OpQuantizationConfig,
+                 mixed_precision_cfg_list: List[OpQuantizationConfig],
+                 name: str) -> TargetPlatformCapabilities:
     """
     Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
-        default_config: A default OpQuantizationConfig to set as the TP model default configuration.
+        default_config: A default OpQuantizationConfig to set as the TPC default configuration.
         base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
-        mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
+        mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TPC mixed-precision
             quantization configuration options.
         name: The name of the TargetPlatformCapabilities.
 
@@ -274,7 +274,7 @@ def generate_tp_model(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=2,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TP_MODEL,
+        tpc_platform_type=IMX500_TPC,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=True,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py
similarity index 95%
rename from tests/common_tests/helpers/tpcs_for_tests/v1_pot/tp_model.py
rename to tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py
index a0e8bef02..c2a708cc7 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    IMX500_TP_MODEL
+    IMX500_TPC
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
@@ -26,22 +26,22 @@
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformCapabilities:
+def get_tpc() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
     NOTE: in order to generate a target platform model with different configurations but with the same Operators Sets
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
-    'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
+    'get_op_quantization_configs' method and use its output to call 'generate_tpc' with your configurations.
 
     Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
-    return generate_tp_model(default_config=default_config,
-                             base_config=base_config,
-                             mixed_precision_cfg_list=mixed_precision_cfg_list,
-                             name='imx500_pot_tp_model')
+    return generate_tpc(default_config=default_config,
+                        base_config=base_config,
+                        mixed_precision_cfg_list=mixed_precision_cfg_list,
+                        name='imx500_pot_tpc')
 
 
 def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
@@ -125,18 +125,18 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     return linear_eight_bits, mixed_precision_cfg_list, eight_bits_default
 
 
-def generate_tp_model(default_config: OpQuantizationConfig,
-                      base_config: OpQuantizationConfig,
-                      mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformCapabilities:
+def generate_tpc(default_config: OpQuantizationConfig,
+                 base_config: OpQuantizationConfig,
+                 mixed_precision_cfg_list: List[OpQuantizationConfig],
+                 name: str) -> TargetPlatformCapabilities:
     """
     Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
-        default_config: A default OpQuantizationConfig to set as the TP model default configuration.
+        default_config: A default OpQuantizationConfig to set as the TPC default configuration.
         base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
-        mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
+        mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TPC mixed-precision
             quantization configuration options.
         name: The name of the TargetPlatformCapabilities.
 
@@ -270,7 +270,7 @@ def generate_tp_model(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=2,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TP_MODEL,
+        tpc_platform_type=IMX500_TPC,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=True,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v2/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py
similarity index 95%
rename from tests/common_tests/helpers/tpcs_for_tests/v2/tp_model.py
rename to tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py
index 1f2abc67b..89aa63f82 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v2/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    IMX500_TP_MODEL
+    IMX500_TPC
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
@@ -26,23 +26,23 @@
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformCapabilities:
+def get_tpc() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
     NOTE: in order to generate a target platform model with different configurations but with the same Operators Sets
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
-    'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
+    'get_op_quantization_configs' method and use its output to call 'generate_tpc' with your configurations.
     This version enables metadata by default.
 
     Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
-    return generate_tp_model(default_config=default_config,
-                             base_config=base_config,
-                             mixed_precision_cfg_list=mixed_precision_cfg_list,
-                             name='imx500_tp_model')
+    return generate_tpc(default_config=default_config,
+                        base_config=base_config,
+                        mixed_precision_cfg_list=mixed_precision_cfg_list,
+                        name='imx500_tpc')
 
 
 def get_op_quantization_configs() -> \
@@ -59,7 +59,7 @@ def get_op_quantization_configs() -> \
     # TODO: currently, we don't want to quantize any attribute but the kernel by default,
     #  to preserve the current behavior of MCT, so quantization is disabled for all other attributes.
     #  Other quantization parameters are set to what we eventually want to quantize by default
-    #  when we enable multi-attributes quantization - THIS NEED TO BE MODIFIED IN ALL TP MODELS!
+    #  when we enable multi-attributes quantization - THIS NEED TO BE MODIFIED IN ALL TPCS!
 
     # define a default quantization config for all non-specified weights attributes.
     default_weight_attr_config = AttributeQuantizationConfig(
@@ -134,18 +134,18 @@ def get_op_quantization_configs() -> \
     return linear_eight_bits, mixed_precision_cfg_list, eight_bits_default
 
 
-def generate_tp_model(default_config: OpQuantizationConfig,
-                      base_config: OpQuantizationConfig,
-                      mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformCapabilities:
+def generate_tpc(default_config: OpQuantizationConfig,
+                 base_config: OpQuantizationConfig,
+                 mixed_precision_cfg_list: List[OpQuantizationConfig],
+                 name: str) -> TargetPlatformCapabilities:
     """
     Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
-        default_config: A default OpQuantizationConfig to set as the TP model default configuration.
+        default_config: A default OpQuantizationConfig to set as the TPC default configuration.
         base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
-        mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
+        mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TPC mixed-precision
             quantization configuration options.
         name: The name of the TargetPlatformCapabilities.
 
@@ -253,7 +253,7 @@ def generate_tp_model(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=2,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TP_MODEL,
+        tpc_platform_type=IMX500_TPC,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=True,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py
similarity index 95%
rename from tests/common_tests/helpers/tpcs_for_tests/v2_lut/tp_model.py
rename to tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py
index d951c6da8..6eb1fe242 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    WEIGHTS_QUANTIZATION_METHOD, IMX500_TP_MODEL
+    WEIGHTS_QUANTIZATION_METHOD, IMX500_TPC
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
@@ -26,23 +26,23 @@
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformCapabilities:
+def get_tpc() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
     NOTE: in order to generate a target platform model with different configurations but with the same Operators Sets
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
-    'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
+    'get_op_quantization_configs' method and use its output to call 'generate_tpc' with your configurations.
     This version enables metadata by default.
 
     Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
-    return generate_tp_model(default_config=default_config,
-                             base_config=base_config,
-                             mixed_precision_cfg_list=mixed_precision_cfg_list,
-                             name='imx500_lut_tp_model')
+    return generate_tpc(default_config=default_config,
+                        base_config=base_config,
+                        mixed_precision_cfg_list=mixed_precision_cfg_list,
+                        name='imx500_lut_tpc')
 
 
 def get_op_quantization_configs() -> \
@@ -131,18 +131,18 @@ def get_op_quantization_configs() -> \
     return linear_eight_bits, mixed_precision_cfg_list, eight_bits_default
 
 
-def generate_tp_model(default_config: OpQuantizationConfig,
-                      base_config: OpQuantizationConfig,
-                      mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformCapabilities:
+def generate_tpc(default_config: OpQuantizationConfig,
+                 base_config: OpQuantizationConfig,
+                 mixed_precision_cfg_list: List[OpQuantizationConfig],
+                 name: str) -> TargetPlatformCapabilities:
     """
     Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
-        default_config: A default OpQuantizationConfig to set as the TP model default configuration.
+        default_config: A default OpQuantizationConfig to set as the TPC default configuration.
         base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
-        mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
+        mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TPC mixed-precision
             quantization configuration options.
         name: The name of the TargetPlatformCapabilities.
 
@@ -276,7 +276,7 @@ def generate_tp_model(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=2,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TP_MODEL,
+        tpc_platform_type=IMX500_TPC,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=True,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v3/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py
similarity index 95%
rename from tests/common_tests/helpers/tpcs_for_tests/v3/tp_model.py
rename to tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py
index 69a19f8be..95e1c6786 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v3/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    IMX500_TP_MODEL
+    IMX500_TPC
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
@@ -26,23 +26,23 @@
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformCapabilities:
+def get_tpc() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
     NOTE: in order to generate a target platform model with different configurations but with the same Operators Sets
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
-    'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
+    'get_op_quantization_configs' method and use its output to call 'generate_tpc' with your configurations.
     This version enables metadata by default.
 
     Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
-    return generate_tp_model(default_config=default_config,
-                             base_config=base_config,
-                             mixed_precision_cfg_list=mixed_precision_cfg_list,
-                             name='imx500_tp_model')
+    return generate_tpc(default_config=default_config,
+                        base_config=base_config,
+                        mixed_precision_cfg_list=mixed_precision_cfg_list,
+                        name='imx500_tpc')
 
 
 def get_op_quantization_configs() -> \
@@ -59,7 +59,7 @@ def get_op_quantization_configs() -> \
     # TODO: currently, we don't want to quantize any attribute but the kernel by default,
     #  to preserve the current behavior of MCT, so quantization is disabled for all other attributes.
     #  Other quantization parameters are set to what we eventually want to quantize by default
-    #  when we enable multi-attributes quantization - THIS NEED TO BE MODIFIED IN ALL TP MODELS!
+    #  when we enable multi-attributes quantization - THIS NEED TO BE MODIFIED IN ALL TPCS!
 
     # define a default quantization config for all non-specified weights attributes.
     default_weight_attr_config = AttributeQuantizationConfig(
@@ -134,18 +134,18 @@ def get_op_quantization_configs() -> \
     return linear_eight_bits, mixed_precision_cfg_list, eight_bits_default
 
 
-def generate_tp_model(default_config: OpQuantizationConfig,
-                      base_config: OpQuantizationConfig,
-                      mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformCapabilities:
+def generate_tpc(default_config: OpQuantizationConfig,
+                 base_config: OpQuantizationConfig,
+                 mixed_precision_cfg_list: List[OpQuantizationConfig],
+                 name: str) -> TargetPlatformCapabilities:
     """
     Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
-        default_config: A default OpQuantizationConfig to set as the TP model default configuration.
+        default_config: A default OpQuantizationConfig to set as the TPC default configuration.
         base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
-        mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
+        mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TPC mixed-precision
             quantization configuration options.
         name: The name of the TargetPlatformCapabilities.
 
@@ -283,7 +283,7 @@ def generate_tp_model(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=3,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TP_MODEL,
+        tpc_platform_type=IMX500_TPC,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=True,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py
similarity index 95%
rename from tests/common_tests/helpers/tpcs_for_tests/v3_lut/tp_model.py
rename to tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py
index 20de769e7..530ea55d3 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    WEIGHTS_QUANTIZATION_METHOD, IMX500_TP_MODEL
+    WEIGHTS_QUANTIZATION_METHOD, IMX500_TPC
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
@@ -26,23 +26,23 @@
 tp = mct.target_platform
 
 
-def get_tp_model() -> TargetPlatformCapabilities:
+def get_tpc() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
     NOTE: in order to generate a target platform model with different configurations but with the same Operators Sets
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
-    'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
+    'get_op_quantization_configs' method and use its output to call 'generate_tpc' with your configurations.
     This version enables metadata by default.
 
     Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
-    return generate_tp_model(default_config=default_config,
-                             base_config=base_config,
-                             mixed_precision_cfg_list=mixed_precision_cfg_list,
-                             name='imx500_lut_tp_model')
+    return generate_tpc(default_config=default_config,
+                        base_config=base_config,
+                        mixed_precision_cfg_list=mixed_precision_cfg_list,
+                        name='imx500_lut_tpc')
 
 
 def get_op_quantization_configs() -> \
@@ -131,18 +131,18 @@ def get_op_quantization_configs() -> \
     return linear_eight_bits, mixed_precision_cfg_list, eight_bits_default
 
 
-def generate_tp_model(default_config: OpQuantizationConfig,
-                      base_config: OpQuantizationConfig,
-                      mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformCapabilities:
+def generate_tpc(default_config: OpQuantizationConfig,
+                 base_config: OpQuantizationConfig,
+                 mixed_precision_cfg_list: List[OpQuantizationConfig],
+                 name: str) -> TargetPlatformCapabilities:
     """
     Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
-        default_config: A default OpQuantizationConfig to set as the TP model default configuration.
+        default_config: A default OpQuantizationConfig to set as the TPC default configuration.
         base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
-        mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
+        mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TPC mixed-precision
             quantization configuration options.
         name: The name of the TargetPlatformCapabilities.
 
@@ -308,7 +308,7 @@ def generate_tp_model(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=3,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TP_MODEL,
+        tpc_platform_type=IMX500_TPC,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=True,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v4/tp_model.py b/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py
similarity index 96%
rename from tests/common_tests/helpers/tpcs_for_tests/v4/tp_model.py
rename to tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py
index 7389aecf6..64a5944c5 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v4/tp_model.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py
@@ -18,30 +18,30 @@
 import model_compression_toolkit.target_platform_capabilities.schema.v1 as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    IMX500_TP_MODEL
+    IMX500_TPC
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
 tp = mct.target_platform
 
-def get_tp_model() -> TargetPlatformCapabilities:
+def get_tpc() -> TargetPlatformCapabilities:
     """
     A method that generates a default target platform model, with base 8-bit quantization configuration and 8, 4, 2
     bits configuration list for mixed-precision quantization.
     NOTE: in order to generate a target platform model with different configurations but with the same Operators Sets
     (for tests, experiments, etc.), use this method implementation as a test-case, i.e., override the
-    'get_op_quantization_configs' method and use its output to call 'generate_tp_model' with your configurations.
+    'get_op_quantization_configs' method and use its output to call 'generate_tpc' with your configurations.
     This version enables metadata by default.
 
     Returns: A TargetPlatformCapabilities object.
 
     """
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
-    return generate_tp_model(default_config=default_config,
-                             base_config=base_config,
-                             mixed_precision_cfg_list=mixed_precision_cfg_list,
-                             name='imx500_tp_model')
+    return generate_tpc(default_config=default_config,
+                        base_config=base_config,
+                        mixed_precision_cfg_list=mixed_precision_cfg_list,
+                        name='imx500_tpc')
 
 
 def get_op_quantization_configs() -> \
@@ -58,7 +58,7 @@ def get_op_quantization_configs() -> \
     # TODO: currently, we don't want to quantize any attribute but the kernel by default,
     #  to preserve the current behavior of MCT, so quantization is disabled for all other attributes.
     #  Other quantization parameters are set to what we eventually want to quantize by default
-    #  when we enable multi-attributes quantization - THIS NEED TO BE MODIFIED IN ALL TP MODELS!
+    #  when we enable multi-attributes quantization - THIS NEED TO BE MODIFIED IN ALL TPCS!
 
     # define a default quantization config for all non-specified weights attributes.
     default_weight_attr_config = AttributeQuantizationConfig(
@@ -133,18 +133,18 @@ def get_op_quantization_configs() -> \
     return linear_eight_bits, mixed_precision_cfg_list, eight_bits_default
 
 
-def generate_tp_model(default_config: OpQuantizationConfig,
-                      base_config: OpQuantizationConfig,
-                      mixed_precision_cfg_list: List[OpQuantizationConfig],
-                      name: str) -> TargetPlatformCapabilities:
+def generate_tpc(default_config: OpQuantizationConfig,
+                 base_config: OpQuantizationConfig,
+                 mixed_precision_cfg_list: List[OpQuantizationConfig],
+                 name: str) -> TargetPlatformCapabilities:
     """
     Generates TargetPlatformCapabilities with default defined Operators Sets, based on the given base configuration and
     mixed-precision configurations options list.
 
     Args
-        default_config: A default OpQuantizationConfig to set as the TP model default configuration.
+        default_config: A default OpQuantizationConfig to set as the TPC default configuration.
         base_config: An OpQuantizationConfig to set as the TargetPlatformCapabilities base configuration for mixed-precision purposes only.
-        mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TP model mixed-precision
+        mixed_precision_cfg_list: A list of OpQuantizationConfig to be used as the TPC mixed-precision
             quantization configuration options.
         name: The name of the TargetPlatformCapabilities.
 
@@ -317,7 +317,7 @@ def generate_tp_model(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=4,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TP_MODEL,
+        tpc_platform_type=IMX500_TPC,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=True,
diff --git a/tests/common_tests/test_tp_model.py b/tests/common_tests/test_tpc.py
similarity index 92%
rename from tests/common_tests/test_tp_model.py
rename to tests/common_tests/test_tpc.py
index 6d7a084b4..9802152a1 100644
--- a/tests/common_tests/test_tp_model.py
+++ b/tests/common_tests/test_tpc.py
@@ -24,7 +24,7 @@
     get_config_options_by_operators_set, is_opset_in_model
 from model_compression_toolkit.target_platform_capabilities.tpc_io_handler import load_target_platform_model, \
     export_target_platform_model
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_attr_configs, generate_test_op_qc
+from tests.common_tests.helpers.generate_test_tpc import generate_test_attr_configs, generate_test_op_qc
 
 tp = mct.target_platform
 
@@ -45,14 +45,14 @@ def setUp(self):
         op2 = schema.OperatorsSet(name="opset2")
         op3 = schema.OperatorsSet(name="opset3")
         op12 = schema.OperatorSetGroup(operators_set=[op1, op2])
-        self.tp_model = schema.TargetPlatformCapabilities(default_qco=TEST_QCO,
-                                                          operator_set=(op1, op2, op3),
-                                                          fusing_patterns=(schema.Fusing(operator_groups=(op12, op3)),
+        self.tpc = schema.TargetPlatformCapabilities(default_qco=TEST_QCO,
+                                                     operator_set=(op1, op2, op3),
+                                                     fusing_patterns=(schema.Fusing(operator_groups=(op12, op3)),
                                                                     schema.Fusing(operator_groups=(op1, op2))),
-                                                          tpc_minor_version=1,
-                                                          tpc_patch_version=0,
-                                                          tpc_platform_type="dump_to_json",
-                                                          add_metadata=False)
+                                                     tpc_minor_version=1,
+                                                     tpc_patch_version=0,
+                                                     tpc_platform_type="dump_to_json",
+                                                     add_metadata=False)
 
         # Create invalid JSON file
         with open(self.invalid_json_file, "w") as file:
@@ -66,8 +66,8 @@ def tearDown(self):
 
     def test_valid_model_object(self):
         """Test that a valid TargetPlatformCapabilities object is returned unchanged."""
-        result = load_target_platform_model(self.tp_model)
-        self.assertEqual(self.tp_model, result)
+        result = load_target_platform_model(self.tpc)
+        self.assertEqual(self.tpc, result)
 
     def test_invalid_json_parsing(self):
         """Test that invalid JSON content raises a ValueError."""
@@ -102,14 +102,14 @@ def test_invalid_input_type(self):
 
     def test_valid_export(self):
         """Test exporting a valid TargetPlatformCapabilities instance to a file."""
-        export_target_platform_model(self.tp_model, self.valid_export_path)
+        export_target_platform_model(self.tpc, self.valid_export_path)
         # Verify the file exists
         self.assertTrue(os.path.exists(self.valid_export_path))
 
         # Verify the contents match the model's JSON representation
         with open(self.valid_export_path, "r", encoding="utf-8") as file:
             content = file.read()
-        self.assertEqual(content, self.tp_model.json(indent=4))
+        self.assertEqual(content, self.tpc.json(indent=4))
 
     def test_export_with_invalid_model(self):
         """Test that exporting an invalid model raises a ValueError."""
@@ -120,21 +120,21 @@ def test_export_with_invalid_model(self):
     def test_export_with_invalid_path(self):
         """Test that exporting to an invalid path raises an OSError."""
         with self.assertRaises(OSError) as context:
-            export_target_platform_model(self.tp_model, self.invalid_export_path)
+            export_target_platform_model(self.tpc, self.invalid_export_path)
         self.assertIn("Failed to write to file", str(context.exception))
 
     def test_export_creates_parent_directories(self):
         """Test that exporting creates missing parent directories."""
         nested_path = "nested/directory/exported_model.json"
         try:
-            export_target_platform_model(self.tp_model, nested_path)
+            export_target_platform_model(self.tpc, nested_path)
             # Verify the file exists
             self.assertTrue(os.path.exists(nested_path))
 
             # Verify the contents match the model's JSON representation
             with open(nested_path, "r", encoding="utf-8") as file:
                 content = file.read()
-            self.assertEqual(content, self.tp_model.json(indent=4))
+            self.assertEqual(content, self.tpc.json(indent=4))
         finally:
             # Cleanup created directories
             if os.path.exists(nested_path):
@@ -146,9 +146,9 @@ def test_export_creates_parent_directories(self):
 
     def test_export_then_import(self):
         """Test that a model exported and then imported is identical."""
-        export_target_platform_model(self.tp_model, self.valid_export_path)
+        export_target_platform_model(self.tpc, self.valid_export_path)
         imported_model = load_target_platform_model(self.valid_export_path)
-        self.assertEqual(self.tp_model, imported_model)
+        self.assertEqual(self.tpc, imported_model)
 
 class TargetPlatformModelingTest(unittest.TestCase):
     def test_immutable_tp(self):
@@ -173,7 +173,7 @@ def test_default_options_more_than_single_qc(self):
                                               add_metadata=False)
         self.assertEqual('Default QuantizationConfigOptions must contain exactly one option.', str(e.exception))
 
-    def test_tp_model_show(self):
+    def test_tpc_show(self):
         tpm = schema.TargetPlatformCapabilities(default_qco=TEST_QCO,
                                                 tpc_minor_version=None,
                                                 tpc_patch_version=None,
diff --git a/tests/external_tests/keras_tests/models_tests/test_networks_runner.py b/tests/external_tests/keras_tests/models_tests/test_networks_runner.py
index 5c3e5dc07..fcfbe836b 100644
--- a/tests/external_tests/keras_tests/models_tests/test_networks_runner.py
+++ b/tests/external_tests/keras_tests/models_tests/test_networks_runner.py
@@ -25,7 +25,7 @@
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.gptq.keras.gptq_loss import multiple_tensors_mse_loss
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
 from tests.keras_tests.tpc_keras import get_16bit_tpc
 
@@ -38,11 +38,11 @@
                                                   relu_bound_to_power_of_2=False, weights_bias_correction=False)
 
 TWO_BIT_QUANTIZATION = generate_keras_tpc(name="two_bit_network_test",
-                                          tp_model=generate_test_tp_model({'weights_n_bits': 2,
+                                          tpc=generate_test_tpc({'weights_n_bits': 2,
                                                                            'activation_n_bits': 2}))
 
 EIGHT_BIT_QUANTIZATION = generate_keras_tpc(name="eight_bit_network_test",
-                                            tp_model=generate_test_tp_model({'weights_n_bits': 8,
+                                            tpc=generate_test_tpc({'weights_n_bits': 8,
                                                                              'activation_n_bits': 8}))
 
 FLOAT_QUANTIZATION = get_16bit_tpc("float_network_test")
diff --git a/tests/external_tests/keras_tests/models_tests/test_networks_runner_float.py b/tests/external_tests/keras_tests/models_tests/test_networks_runner_float.py
index e5ca3e3da..0e0ae20ae 100644
--- a/tests/external_tests/keras_tests/models_tests/test_networks_runner_float.py
+++ b/tests/external_tests/keras_tests/models_tests/test_networks_runner_float.py
@@ -26,7 +26,7 @@
 from model_compression_toolkit.core.common.quantization.set_node_quantization_config import \
     set_quantization_configuration_to_graph
 from model_compression_toolkit.core.common.substitutions.apply_substitutions import substitute
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
 from model_compression_toolkit.core.keras.reader.reader import model_reader
@@ -56,7 +56,7 @@ def run_network(self, inputs_list):
         fw_impl = KerasImplementation()
         fw_info = DEFAULT_KERAS_INFO
 
-        keras_default_tpc = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
+        keras_default_tpc = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
 
         graph = model_reader(self.model_float)  # model reading
         graph.set_fw_info(DEFAULT_KERAS_INFO)
diff --git a/tests/keras_tests/custom_layers_tests/test_sony_ssd_postprocess_layer.py b/tests/keras_tests/custom_layers_tests/test_sony_ssd_postprocess_layer.py
index 14c19057b..e3726425b 100644
--- a/tests/keras_tests/custom_layers_tests/test_sony_ssd_postprocess_layer.py
+++ b/tests/keras_tests/custom_layers_tests/test_sony_ssd_postprocess_layer.py
@@ -20,7 +20,7 @@
 import model_compression_toolkit as mct
 from sony_custom_layers.keras.object_detection.ssd_post_process import SSDPostProcess
 from mct_quantizers.keras.metadata import MetadataLayer
-from tests.common_tests.helpers.tpcs_for_tests.v4.tp_model import get_tp_model
+from tests.common_tests.helpers.tpcs_for_tests.v4.tpc import get_tpc
 
 keras = tf.keras
 layers = keras.layers
@@ -56,7 +56,7 @@ def test_custom_layer(self):
                                                               get_rep_dataset(2, (1, 8, 8, 3)),
                                                               core_config=core_config,
                                                               target_resource_utilization=mct.core.ResourceUtilization(weights_memory=6000),
-                                                              target_platform_capabilities=get_tp_model()
+                                                              target_platform_capabilities=get_tpc()
                                                               )
 
         # verify the custom layer is in the quantized model
diff --git a/tests/keras_tests/exporter_tests/keras_fake_quant/keras_fake_quant_exporter_base_test.py b/tests/keras_tests/exporter_tests/keras_fake_quant/keras_fake_quant_exporter_base_test.py
index 629af64a3..864714cc9 100644
--- a/tests/keras_tests/exporter_tests/keras_fake_quant/keras_fake_quant_exporter_base_test.py
+++ b/tests/keras_tests/exporter_tests/keras_fake_quant/keras_fake_quant_exporter_base_test.py
@@ -23,7 +23,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit import get_target_platform_capabilities
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
 from model_compression_toolkit.exporter.model_exporter.keras.base_keras_exporter import DEFAULT_KERAS_EXPORT_EXTENTION
 
 def get_minmax_from_qparams(qparams):
@@ -82,7 +82,7 @@ def get_input_shape(self):
         return [(16, 16, 3)]
 
     def get_tpc(self):
-        return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
+        return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig()
diff --git a/tests/keras_tests/exporter_tests/keras_fake_quant/networks/conv2d_test.py b/tests/keras_tests/exporter_tests/keras_fake_quant/networks/conv2d_test.py
index 803a9bffa..33a6a7e6b 100644
--- a/tests/keras_tests/exporter_tests/keras_fake_quant/networks/conv2d_test.py
+++ b/tests/keras_tests/exporter_tests/keras_fake_quant/networks/conv2d_test.py
@@ -26,7 +26,7 @@
 
 from mct_quantizers import QuantizationMethod
 from mct_quantizers.keras.quantizers import WeightsLUTSymmetricInferableQuantizer
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.exporter_tests.keras_fake_quant.keras_fake_quant_exporter_base_test import \
     KerasFakeQuantExporterBaseTest, get_minmax_from_qparams
@@ -37,9 +37,9 @@ def get_input_shape(self):
         return [(30, 30, 3)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 2,
+        tp = generate_test_tpc({'weights_n_bits': 2,
                                      'activation_n_bits': 2})
-        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
     def get_model(self):
         inputs = Input(shape=self.get_input_shape()[0])
@@ -113,9 +113,9 @@ def get_input_shape(self):
         return [(30, 30, 3)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 2,
+        tp = generate_test_tpc({'weights_n_bits': 2,
                                      'weights_quantization_method': QuantizationMethod.LUT_SYM_QUANTIZER})
-        return generate_keras_tpc(name="test_conv2d_2bit_lut_fq_weight", tp_model=tp)
+        return generate_keras_tpc(name="test_conv2d_2bit_lut_fq_weight", tpc=tp)
 
     def get_model(self):
         inputs = Input(shape=self.get_input_shape()[0])
diff --git a/tests/keras_tests/exporter_tests/keras_fake_quant/networks/conv2dtranspose_test.py b/tests/keras_tests/exporter_tests/keras_fake_quant/networks/conv2dtranspose_test.py
index 1ea9a7288..111aba46e 100644
--- a/tests/keras_tests/exporter_tests/keras_fake_quant/networks/conv2dtranspose_test.py
+++ b/tests/keras_tests/exporter_tests/keras_fake_quant/networks/conv2dtranspose_test.py
@@ -23,7 +23,7 @@
 import numpy as np
 import tensorflow as tf
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.exporter_tests.keras_fake_quant.keras_fake_quant_exporter_base_test import \
     KerasFakeQuantExporterBaseTest, get_minmax_from_qparams
@@ -35,9 +35,9 @@ def get_input_shape(self):
         return [(30, 30, 3)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 2,
+        tp = generate_test_tpc({'weights_n_bits': 2,
                                      'activation_n_bits': 2})
-        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
     def get_model(self):
         inputs = Input(shape=self.get_input_shape()[0])
diff --git a/tests/keras_tests/exporter_tests/keras_fake_quant/networks/dense_test.py b/tests/keras_tests/exporter_tests/keras_fake_quant/networks/dense_test.py
index a4b9a39ee..682d9b644 100644
--- a/tests/keras_tests/exporter_tests/keras_fake_quant/networks/dense_test.py
+++ b/tests/keras_tests/exporter_tests/keras_fake_quant/networks/dense_test.py
@@ -23,7 +23,7 @@
 import numpy as np
 import tensorflow as tf
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.exporter_tests.keras_fake_quant.keras_fake_quant_exporter_base_test import \
     KerasFakeQuantExporterBaseTest, get_minmax_from_qparams
@@ -35,9 +35,9 @@ def get_input_shape(self):
         return [(2, 2, 50)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 2,
+        tp = generate_test_tpc({'weights_n_bits': 2,
                                      'activation_n_bits': 2})
-        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
     def get_model(self):
         inputs = Input(shape=self.get_input_shape()[0])
diff --git a/tests/keras_tests/exporter_tests/keras_fake_quant/networks/dwconv2d_test.py b/tests/keras_tests/exporter_tests/keras_fake_quant/networks/dwconv2d_test.py
index a351aeb49..c08fa8e41 100644
--- a/tests/keras_tests/exporter_tests/keras_fake_quant/networks/dwconv2d_test.py
+++ b/tests/keras_tests/exporter_tests/keras_fake_quant/networks/dwconv2d_test.py
@@ -25,7 +25,7 @@
 import numpy as np
 import tensorflow as tf
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.exporter_tests.keras_fake_quant.keras_fake_quant_exporter_base_test import \
     KerasFakeQuantExporterBaseTest, get_minmax_from_qparams
@@ -37,9 +37,9 @@ def get_input_shape(self):
         return [(30, 30, 3)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 2,
+        tp = generate_test_tpc({'weights_n_bits': 2,
                                      'activation_n_bits': 2})
-        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
     def get_model(self):
         inputs = Input(shape=self.get_input_shape()[0])
diff --git a/tests/keras_tests/exporter_tests/keras_fake_quant/networks/multiple_inputs_test.py b/tests/keras_tests/exporter_tests/keras_fake_quant/networks/multiple_inputs_test.py
index 0bac62b07..c013a72b5 100644
--- a/tests/keras_tests/exporter_tests/keras_fake_quant/networks/multiple_inputs_test.py
+++ b/tests/keras_tests/exporter_tests/keras_fake_quant/networks/multiple_inputs_test.py
@@ -21,7 +21,7 @@
 else:
     from keras.layers import Conv2D, Add
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.exporter_tests.keras_fake_quant.keras_fake_quant_exporter_base_test import \
     KerasFakeQuantExporterBaseTest
@@ -33,8 +33,8 @@ def get_input_shape(self):
         return [(30, 30, 3), (28, 28, 3)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 2})
-        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        tp = generate_test_tpc({'weights_n_bits': 2})
+        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
     def get_model(self):
         inputs1 = Input(shape=self.get_input_shape()[0])
diff --git a/tests/keras_tests/exporter_tests/keras_fake_quant/networks/no_quant_test.py b/tests/keras_tests/exporter_tests/keras_fake_quant/networks/no_quant_test.py
index a90f5b682..fac4fa0c3 100644
--- a/tests/keras_tests/exporter_tests/keras_fake_quant/networks/no_quant_test.py
+++ b/tests/keras_tests/exporter_tests/keras_fake_quant/networks/no_quant_test.py
@@ -22,7 +22,7 @@
     from keras.layers import Conv2D
 import numpy as np
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.exporter_tests.keras_fake_quant.keras_fake_quant_exporter_base_test import \
     KerasFakeQuantExporterBaseTest
@@ -34,9 +34,9 @@ def get_input_shape(self):
         return [(30, 30, 3)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'enable_weights_quantization': False,
+        tp = generate_test_tpc({'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_keras_tpc(name="test_no_quant", tp_model=tp)
+        return generate_keras_tpc(name="test_no_quant", tpc=tp)
 
     def get_model(self):
         inputs = Input(shape=self.get_input_shape()[0])
diff --git a/tests/keras_tests/exporter_tests/keras_mctq/networks/conv2d_test.py b/tests/keras_tests/exporter_tests/keras_mctq/networks/conv2d_test.py
index a50f9f116..43b8e7871 100644
--- a/tests/keras_tests/exporter_tests/keras_mctq/networks/conv2d_test.py
+++ b/tests/keras_tests/exporter_tests/keras_mctq/networks/conv2d_test.py
@@ -25,7 +25,7 @@
     from keras.layers import Conv2D
 
 from mct_quantizers import QuantizationMethod
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.exporter_tests.keras_fake_quant.keras_fake_quant_exporter_base_test import \
     KerasFakeQuantExporterBaseTest
@@ -36,9 +36,9 @@ def get_input_shape(self):
         return [(30, 30, 3)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 2,
+        tp = generate_test_tpc({'weights_n_bits': 2,
                                      'activation_n_bits': 2})
-        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
     def get_model(self):
         inputs = Input(shape=self.get_input_shape()[0])
@@ -68,9 +68,9 @@ def get_input_shape(self):
         return [(30, 30, 3)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 2,
+        tp = generate_test_tpc({'weights_n_bits': 2,
                                      'weights_quantization_method': QuantizationMethod.LUT_SYM_QUANTIZER})
-        return generate_keras_tpc(name="test_conv2d_2bit_lut_fq_weight", tp_model=tp)
+        return generate_keras_tpc(name="test_conv2d_2bit_lut_fq_weight", tpc=tp)
 
     def get_model(self):
         inputs = Input(shape=self.get_input_shape()[0])
diff --git a/tests/keras_tests/exporter_tests/keras_mctq/networks/conv2dtranspose_test.py b/tests/keras_tests/exporter_tests/keras_mctq/networks/conv2dtranspose_test.py
index 8a4721ce5..297a4a2f4 100644
--- a/tests/keras_tests/exporter_tests/keras_mctq/networks/conv2dtranspose_test.py
+++ b/tests/keras_tests/exporter_tests/keras_mctq/networks/conv2dtranspose_test.py
@@ -26,7 +26,7 @@
 import numpy as np
 import tensorflow as tf
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.exporter_tests.keras_fake_quant.keras_fake_quant_exporter_base_test import \
     get_minmax_from_qparams
@@ -38,9 +38,9 @@ def get_input_shape(self):
         return [(30, 30, 3)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 2,
+        tp = generate_test_tpc({'weights_n_bits': 2,
                                      'activation_n_bits': 2})
-        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
     def get_model(self):
         inputs = Input(shape=self.get_input_shape()[0])
diff --git a/tests/keras_tests/exporter_tests/keras_mctq/networks/dense_test.py b/tests/keras_tests/exporter_tests/keras_mctq/networks/dense_test.py
index dd1b2c198..3c2baabc4 100644
--- a/tests/keras_tests/exporter_tests/keras_mctq/networks/dense_test.py
+++ b/tests/keras_tests/exporter_tests/keras_mctq/networks/dense_test.py
@@ -26,7 +26,7 @@
 import numpy as np
 import tensorflow as tf
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.exporter_tests.keras_fake_quant.keras_fake_quant_exporter_base_test import \
     KerasFakeQuantExporterBaseTest, get_minmax_from_qparams
@@ -38,9 +38,9 @@ def get_input_shape(self):
         return [(2, 2, 50)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 2,
+        tp = generate_test_tpc({'weights_n_bits': 2,
                                      'activation_n_bits': 2})
-        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
     def get_model(self):
         inputs = Input(shape=self.get_input_shape()[0])
diff --git a/tests/keras_tests/exporter_tests/keras_mctq/networks/dwconv2d_test.py b/tests/keras_tests/exporter_tests/keras_mctq/networks/dwconv2d_test.py
index d8b1aa5a1..289ad05d4 100644
--- a/tests/keras_tests/exporter_tests/keras_mctq/networks/dwconv2d_test.py
+++ b/tests/keras_tests/exporter_tests/keras_mctq/networks/dwconv2d_test.py
@@ -28,7 +28,7 @@
 import numpy as np
 import tensorflow as tf
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 
 class TestDWConv2DKerasMCTQExporter(TestKerasMCTQExport):
@@ -37,9 +37,9 @@ def get_input_shape(self):
         return [(30, 30, 3)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 2,
+        tp = generate_test_tpc({'weights_n_bits': 2,
                                      'activation_n_bits': 2})
-        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
     def get_model(self):
         inputs = Input(shape=self.get_input_shape()[0])
diff --git a/tests/keras_tests/exporter_tests/keras_mctq/networks/multiple_inputs_test.py b/tests/keras_tests/exporter_tests/keras_mctq/networks/multiple_inputs_test.py
index 08d6ede61..30d1e6274 100644
--- a/tests/keras_tests/exporter_tests/keras_mctq/networks/multiple_inputs_test.py
+++ b/tests/keras_tests/exporter_tests/keras_mctq/networks/multiple_inputs_test.py
@@ -24,7 +24,7 @@
 else:
     from keras.layers import Conv2D, Add
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.exporter_tests.keras_fake_quant.keras_fake_quant_exporter_base_test import \
     KerasFakeQuantExporterBaseTest
@@ -36,8 +36,8 @@ def get_input_shape(self):
         return [(30, 30, 3), (28, 28, 3)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 2})
-        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        tp = generate_test_tpc({'weights_n_bits': 2})
+        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
     def get_model(self):
         inputs1 = Input(shape=self.get_input_shape()[0])
diff --git a/tests/keras_tests/exporter_tests/keras_mctq/networks/no_quant_test.py b/tests/keras_tests/exporter_tests/keras_mctq/networks/no_quant_test.py
index 27d93d268..f7908affd 100644
--- a/tests/keras_tests/exporter_tests/keras_mctq/networks/no_quant_test.py
+++ b/tests/keras_tests/exporter_tests/keras_mctq/networks/no_quant_test.py
@@ -25,7 +25,7 @@
     from keras.layers import Conv2D
 import numpy as np
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.exporter_tests.keras_fake_quant.keras_fake_quant_exporter_base_test import \
     KerasFakeQuantExporterBaseTest
@@ -37,9 +37,9 @@ def get_input_shape(self):
         return [(30, 30, 3)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'enable_weights_quantization': False,
+        tp = generate_test_tpc({'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_keras_tpc(name="test_no_quant", tp_model=tp)
+        return generate_keras_tpc(name="test_no_quant", tpc=tp)
 
     def get_model(self):
         inputs = Input(shape=self.get_input_shape()[0])
diff --git a/tests/keras_tests/exporter_tests/tflite_fake_quant/networks/conv2d_test.py b/tests/keras_tests/exporter_tests/tflite_fake_quant/networks/conv2d_test.py
index 75abf4df6..7f5b411a5 100644
--- a/tests/keras_tests/exporter_tests/tflite_fake_quant/networks/conv2d_test.py
+++ b/tests/keras_tests/exporter_tests/tflite_fake_quant/networks/conv2d_test.py
@@ -22,7 +22,7 @@
 from tests.keras_tests.exporter_tests.tflite_fake_quant.tflite_fake_quant_exporter_base_test import \
     TFLiteFakeQuantExporterBaseTest
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 
 
@@ -32,8 +32,8 @@ def get_input_shape(self):
         return [(30, 30, 3)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 2})
-        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        tp = generate_test_tpc({'weights_n_bits': 2})
+        return generate_keras_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
     def get_model(self):
         inputs = Input(shape=self.get_input_shape()[0])
@@ -68,8 +68,8 @@ def get_input_shape(self):
         return [(30, 30, 3)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 2})
-        return generate_keras_tpc(name="test_conv2d_2bit_reused_weight", tp_model=tp)
+        tp = generate_test_tpc({'weights_n_bits': 2})
+        return generate_keras_tpc(name="test_conv2d_2bit_reused_weight", tpc=tp)
 
     def get_model(self):
         conv = Conv2D(3,3)
diff --git a/tests/keras_tests/exporter_tests/tflite_fake_quant/networks/dense_test.py b/tests/keras_tests/exporter_tests/tflite_fake_quant/networks/dense_test.py
index 39de62468..de0957666 100644
--- a/tests/keras_tests/exporter_tests/tflite_fake_quant/networks/dense_test.py
+++ b/tests/keras_tests/exporter_tests/tflite_fake_quant/networks/dense_test.py
@@ -22,7 +22,7 @@
 from tests.keras_tests.exporter_tests.tflite_fake_quant.tflite_fake_quant_exporter_base_test import \
     TFLiteFakeQuantExporterBaseTest
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 
 
@@ -32,8 +32,8 @@ def get_input_shape(self):
         return [(3, 3, 3)]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 2})
-        return generate_keras_tpc(name="test_dense_2bit_reused_weight", tp_model=tp)
+        tp = generate_test_tpc({'weights_n_bits': 2})
+        return generate_keras_tpc(name="test_dense_2bit_reused_weight", tpc=tp)
 
     def get_model(self):
         dense = Dense(27)
diff --git a/tests/keras_tests/exporter_tests/tflite_fake_quant/tflite_fake_quant_exporter_base_test.py b/tests/keras_tests/exporter_tests/tflite_fake_quant/tflite_fake_quant_exporter_base_test.py
index 887a14452..73df77df3 100644
--- a/tests/keras_tests/exporter_tests/tflite_fake_quant/tflite_fake_quant_exporter_base_test.py
+++ b/tests/keras_tests/exporter_tests/tflite_fake_quant/tflite_fake_quant_exporter_base_test.py
@@ -21,7 +21,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit import get_target_platform_capabilities
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
 import tests.keras_tests.exporter_tests.constants as constants
 from model_compression_toolkit.exporter.model_exporter.keras.base_keras_exporter import DEFAULT_KERAS_EXPORT_EXTENTION
 
@@ -68,7 +68,7 @@ def get_input_shape(self):
         return [(16, 16, 3)]
 
     def get_tpc(self):
-        return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
+        return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
 
     def __get_repr_dataset(self):
         yield [np.random.randn(*((1,) + shape)) for shape in self.get_input_shape()]
diff --git a/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tp_model.py b/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tpc.py
similarity index 89%
rename from tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tp_model.py
rename to tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tpc.py
index 83e346971..bdac7a0c0 100644
--- a/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tp_model.py
+++ b/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tpc.py
@@ -21,7 +21,7 @@
 from model_compression_toolkit.defaultdict import DefaultDict
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, KERAS_KERNEL, BIAS_ATTR, BIAS, \
     KERAS_DEPTHWISE_KERNEL, WEIGHTS_N_BITS
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_op_qc, generate_test_attr_configs
+from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs
 
 if version.parse(tf.__version__) >= version.parse("2.13"):
     from keras.src.layers import Conv2D, DepthwiseConv2D, Dense, Reshape, ZeroPadding2D, Dropout, \
@@ -34,22 +34,22 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OpQuantizationConfig
-from tests.common_tests.helpers.tpcs_for_tests.v1.tp_model import generate_tp_model
+from tests.common_tests.helpers.tpcs_for_tests.v1.tpc import generate_tpc
 
 tp = mct.target_platform
 
 
-def get_tp_model(edit_weights_params_dict, edit_act_params_dict) -> TargetPlatformCapabilities:
+def get_tpc(edit_weights_params_dict, edit_act_params_dict) -> TargetPlatformCapabilities:
     base_config, mixed_precision_cfg_list, default_config = get_op_quantization_configs()
 
     updated_config = base_config.clone_and_edit(attr_to_edit={KERNEL_ATTR: edit_weights_params_dict},
                                                 **edit_act_params_dict)
     op_cfg_list = [updated_config]
 
-    return generate_tp_model(default_config=updated_config,
-                             base_config=updated_config,
-                             mixed_precision_cfg_list=op_cfg_list,
-                             name='int8_tp_model')
+    return generate_tpc(default_config=updated_config,
+                        base_config=updated_config,
+                        mixed_precision_cfg_list=op_cfg_list,
+                        name='int8_tpc')
 
 
 def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantizationConfig], OpQuantizationConfig]:
@@ -64,12 +64,12 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
 
 def get_int8_tpc(edit_weights_params_dict={}, edit_act_params_dict={}) -> tp.TargetPlatformCapabilities:
-    default_tp_model = get_tp_model(edit_weights_params_dict, edit_act_params_dict)
-    return default_tp_model
+    default_tpc = get_tpc(edit_weights_params_dict, edit_act_params_dict)
+    return default_tpc
 
 
-def generate_keras_tpc(name: str, tp_model: schema.TargetPlatformCapabilities):
-    keras_tpc = tp.FrameworkQuantizationCapabilities(tp_model)
+def generate_keras_tpc(name: str, tpc: schema.TargetPlatformCapabilities):
+    keras_tpc = tp.FrameworkQuantizationCapabilities(tpc)
 
     with keras_tpc:
         tp.OperationsSetToLayers("NoQuantization", [Reshape,
diff --git a/tests/keras_tests/exporter_tests/tflite_int8/networks/conv2d_test.py b/tests/keras_tests/exporter_tests/tflite_int8/networks/conv2d_test.py
index 318149786..918314119 100644
--- a/tests/keras_tests/exporter_tests/tflite_int8/networks/conv2d_test.py
+++ b/tests/keras_tests/exporter_tests/tflite_int8/networks/conv2d_test.py
@@ -18,7 +18,7 @@
 import tests.keras_tests.exporter_tests.constants as constants
 from model_compression_toolkit.core.keras.constants import KERNEL
 from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
-from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tp_model import get_int8_tpc
+from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tpc import get_int8_tpc
 from tests.keras_tests.exporter_tests.tflite_int8.tflite_int8_exporter_base_test import TFLiteINT8ExporterBaseTest
 from tests.keras_tests.utils import get_layers_from_model_by_type
 
diff --git a/tests/keras_tests/exporter_tests/tflite_int8/networks/mobilenetv2_test.py b/tests/keras_tests/exporter_tests/tflite_int8/networks/mobilenetv2_test.py
index fe1118508..c425ce3f9 100644
--- a/tests/keras_tests/exporter_tests/tflite_int8/networks/mobilenetv2_test.py
+++ b/tests/keras_tests/exporter_tests/tflite_int8/networks/mobilenetv2_test.py
@@ -19,7 +19,7 @@
 
 import tests.keras_tests.exporter_tests.constants as constants
 from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
-from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tp_model import get_int8_tpc
+from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tpc import get_int8_tpc
 from tests.keras_tests.exporter_tests.tflite_int8.tflite_int8_exporter_base_test import TFLiteINT8ExporterBaseTest
 
 layers = keras.layers
diff --git a/tests/keras_tests/exporter_tests/tflite_int8/tflite_int8_exporter_base_test.py b/tests/keras_tests/exporter_tests/tflite_int8/tflite_int8_exporter_base_test.py
index f148b2fbd..0398f7b6b 100644
--- a/tests/keras_tests/exporter_tests/tflite_int8/tflite_int8_exporter_base_test.py
+++ b/tests/keras_tests/exporter_tests/tflite_int8/tflite_int8_exporter_base_test.py
@@ -24,7 +24,7 @@
 
 import model_compression_toolkit as mct
 import tests.keras_tests.exporter_tests.constants as constants
-from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tp_model import get_int8_tpc
+from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tpc import get_int8_tpc
 
 
 class TFLiteINT8ExporterBaseTest:
diff --git a/tests/keras_tests/feature_networks_tests/base_keras_feature_test.py b/tests/keras_tests/feature_networks_tests/base_keras_feature_test.py
index 9e9b8aa6a..a3c611a35 100644
--- a/tests/keras_tests/feature_networks_tests/base_keras_feature_test.py
+++ b/tests/keras_tests/feature_networks_tests/base_keras_feature_test.py
@@ -14,7 +14,7 @@
 # ==============================================================================
 from model_compression_toolkit.constants import TENSORFLOW
 from model_compression_toolkit.core.common.framework_implementation import FrameworkImplementation
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.gptq import keras_gradient_post_training_quantization
 from model_compression_toolkit.core import FrameworkInfo
@@ -39,7 +39,7 @@ def __init__(self,
                          input_shape=input_shape)
 
     def get_tpc(self):
-        return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
+        return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
 
     def get_ptq_facade(self):
         return keras_post_training_quantization
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/activation_16bit_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/activation_16bit_test.py
index 1e9a44c8c..05bcd08dc 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/activation_16bit_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/activation_16bit_test.py
@@ -18,14 +18,14 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit.constants import TENSORFLOW
 from model_compression_toolkit.core import MixedPrecisionQuantizationConfig
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
 from mct_quantizers.keras.activation_quantization_holder import KerasActivationQuantizationHolder
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OperatorSetNames, \
     QuantizationConfigOptions
 from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import \
     get_config_options_by_operators_set
-from tests.common_tests.helpers.generate_test_tp_model import generate_custom_test_tp_model
-from tests.common_tests.helpers.tpcs_for_tests.v4.tp_model import get_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_custom_test_tpc
+from tests.common_tests.helpers.tpcs_for_tests.v4.tpc import get_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 from tests.keras_tests.utils import get_layers_from_model_by_type
 
@@ -39,17 +39,17 @@
 class Activation16BitTest(BaseKerasFeatureNetworkTest):
 
     def get_tpc(self):
-        tpc = get_tp_model()
+        tpc = get_tpc()
         base_cfg_16 = [c for c in get_config_options_by_operators_set(tpc,
                                                                       OperatorSetNames.MUL).quantization_configurations
                        if c.activation_n_bits == 16][0].clone_and_edit()
         qco_16 = QuantizationConfigOptions(base_config=base_cfg_16,
                                            quantization_configurations=(tpc.default_qco.base_config,
                                                                         base_cfg_16))
-        tpc = generate_custom_test_tp_model(
+        tpc = generate_custom_test_tpc(
             name="custom_16_bit_tpc",
             base_cfg=tpc.default_qco.base_config,
-            base_tp_model=tpc,
+            base_tpc=tpc,
             operator_sets_dict={
                 OperatorSetNames.MUL: qco_16,
             })
@@ -83,7 +83,7 @@ def compare(self, quantized_model, float_model, input_x=None, quantization_info=
 class Activation16BitMixedPrecisionTest(Activation16BitTest):
 
     def get_tpc(self):
-        tpc = get_tp_model()
+        tpc = get_tpc()
 
         mul_qco = get_config_options_by_operators_set(tpc, OperatorSetNames.MUL)
         base_cfg_16 = [l for l in mul_qco.quantization_configurations if l.activation_n_bits == 16][0]
@@ -95,10 +95,10 @@ def get_tpc(self):
         qco_16 = QuantizationConfigOptions(base_config=base_cfg_16,
                                            quantization_configurations=quantization_configurations)
 
-        tpc = generate_custom_test_tp_model(
+        tpc = generate_custom_test_tpc(
             name="custom_16_bit_tpc",
             base_cfg=tpc.default_qco.base_config,
-            base_tp_model=tpc,
+            base_tpc=tpc,
             operator_sets_dict={
                 OperatorSetNames.MUL: qco_16,
             })
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/bn_attributes_quantization_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/bn_attributes_quantization_test.py
index 78ddc0846..f342b1a49 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/bn_attributes_quantization_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/bn_attributes_quantization_test.py
@@ -23,7 +23,7 @@
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import Signedness
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, KERAS_KERNEL, BIAS, BIAS_ATTR
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_attr_configs, \
+from tests.common_tests.helpers.generate_test_tpc import generate_test_attr_configs, \
     DEFAULT_WEIGHT_ATTR_CONFIG, KERNEL_BASE_CONFIG, generate_test_op_qc, BIAS_CONFIG
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 from tests.keras_tests.utils import get_layers_from_model_by_type
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/bn_folding_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/bn_folding_test.py
index bf51d0bfb..616a2d4ea 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/bn_folding_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/bn_folding_test.py
@@ -20,7 +20,7 @@
 import model_compression_toolkit as mct
 import tensorflow as tf
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 import numpy as np
 from tests.common_tests.helpers.tensors_compare import cosine_similarity, normalized_mse
@@ -59,11 +59,11 @@ def __init__(self, unit_test, linear_layer):
         super(BaseBatchNormalizationFolding, self).__init__(unit_test=unit_test)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 16,
+        tp = generate_test_tpc({'weights_n_bits': 16,
                                      'activation_n_bits': 16,
                                      'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_keras_tpc(name="bn_folding_test", tp_model=tp)
+        return generate_keras_tpc(name="bn_folding_test", tpc=tp)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.NOCLIPPING,
@@ -264,11 +264,11 @@ def create_networks(self):
         return tf.keras.models.Model(inputs=inputs, outputs=x)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 16,
+        tp = generate_test_tpc({'weights_n_bits': 16,
                                      'activation_n_bits': 16,
                                      'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_keras_tpc(name="bn_folding_test", tp_model=tp)
+        return generate_keras_tpc(name="bn_folding_test", tpc=tp)
 
     def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
         if self.is_dwconv:
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/compute_max_cut_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/compute_max_cut_test.py
index d256ca67c..579d9c2de 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/compute_max_cut_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/compute_max_cut_test.py
@@ -18,8 +18,8 @@
 
 from mct_quantizers.keras.metadata import get_metadata
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
-from tests.common_tests.helpers.tpcs_for_tests.v2.tp_model import get_tp_model
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
+from tests.common_tests.helpers.tpcs_for_tests.v2.tpc import get_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 
 keras = tf.keras
@@ -42,7 +42,7 @@ def create_networks(self):
         return keras.Model(inputs=inputs, outputs=outputs)
 
     def get_tpc(self):
-        return get_tp_model()
+        return get_tpc()
 
     def get_debug_config(self):
         return mct.core.DebugConfig(simulate_scheduler=True)
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/const_quantization_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/const_quantization_test.py
index 128fa0a9d..d63d97350 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/const_quantization_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/const_quantization_test.py
@@ -20,10 +20,10 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.core import MixedPrecisionQuantizationConfig
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_custom_test_tp_model
-from tests.common_tests.helpers.tpcs_for_tests.v3.tp_model import get_tp_model as get_tp_v3
-from tests.common_tests.helpers.tpcs_for_tests.v4.tp_model import get_tp_model as get_tp_v4
-from tests.common_tests.helpers.tpcs_for_tests.v4.tp_model import generate_tp_model, get_op_quantization_configs
+from tests.common_tests.helpers.generate_test_tpc import generate_custom_test_tpc
+from tests.common_tests.helpers.tpcs_for_tests.v3.tpc import get_tpc as get_tp_v3
+from tests.common_tests.helpers.tpcs_for_tests.v4.tpc import get_tpc as get_tp_v4
+from tests.common_tests.helpers.tpcs_for_tests.v4.tpc import generate_tpc, get_op_quantization_configs
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
 from tests.keras_tests.utils import get_layers_from_model_by_type
@@ -38,10 +38,10 @@
 def create_const_quant_tpc(qmethod):
     name = "const_quant_tpc"
     base_cfg, mp_op_cfg_list, default_cfg = get_op_quantization_configs()
-    base_tp_model = generate_tp_model(default_config=default_cfg,
-                                      base_config=base_cfg,
-                                      mixed_precision_cfg_list=mp_op_cfg_list,
-                                      name=name)
+    base_tpc = generate_tpc(default_config=default_cfg,
+                                 base_config=base_cfg,
+                                 mixed_precision_cfg_list=mp_op_cfg_list,
+                                 name=name)
 
     const_config = default_cfg.clone_and_edit(
         default_weight_attr_config=default_cfg.default_weight_attr_config.clone_and_edit(
@@ -61,12 +61,12 @@ def create_const_quant_tpc(qmethod):
     operator_sets_dict[schema.OperatorSetNames.STACK] = const_merge_configuration_options
     operator_sets_dict[schema.OperatorSetNames.CONCATENATE] = const_merge_configuration_options
 
-    tp_model = generate_custom_test_tp_model(name=name,
-                                             base_cfg=base_cfg,
-                                             base_tp_model=base_tp_model,
-                                             operator_sets_dict=operator_sets_dict)
+    tpc = generate_custom_test_tpc(name=name,
+                                        base_cfg=base_cfg,
+                                        base_tpc=base_tpc,
+                                        operator_sets_dict=operator_sets_dict)
 
-    return tp_model
+    return tpc
 
 
 class ConstQuantizationTest(BaseKerasFeatureNetworkTest):
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/const_representation_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/const_representation_test.py
index 11754f504..ab497f630 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/const_representation_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/const_representation_test.py
@@ -18,8 +18,8 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit import get_target_platform_capabilities
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
@@ -45,11 +45,11 @@ def generate_inputs(self):
         return [1 + np.random.random(in_shape) for in_shape in self.get_input_shapes()]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 16,
+        tp = generate_test_tpc({'weights_n_bits': 16,
                                      'activation_n_bits': 16,
                                      'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_keras_tpc(name="const_representation_test", tp_model=tp)
+        return generate_keras_tpc(name="const_representation_test", tpc=tp)
 
     def create_networks(self):
         inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
@@ -90,7 +90,7 @@ def generate_inputs(self):
         return [1 + np.random.random(in_shape) for in_shape in self.get_input_shapes()]
 
     def get_tpc(self):
-        return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
+        return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
 
     def create_networks(self):
         inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
@@ -110,11 +110,11 @@ def __init__(self, unit_test, input_shape=(32, 32, 16)):
         super(ConstRepresentationMatMulTest, self).__init__(unit_test=unit_test, input_shape=input_shape)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 16,
+        tp = generate_test_tpc({'weights_n_bits': 16,
                                      'activation_n_bits': 16,
                                      'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_keras_tpc(name="const_representation_test", tp_model=tp)
+        return generate_keras_tpc(name="const_representation_test", tpc=tp)
 
     def create_networks(self):
         inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
@@ -146,11 +146,11 @@ def __init__(self, unit_test, input_shape=(32, 32, 16)):
         super(ConstRepresentationMultiInputTest, self).__init__(unit_test=unit_test, input_shape=input_shape)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 16,
+        tp = generate_test_tpc({'weights_n_bits': 16,
                                      'activation_n_bits': 16,
                                      'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_keras_tpc(name="const_representation_test", tp_model=tp)
+        return generate_keras_tpc(name="const_representation_test", tpc=tp)
 
     def create_networks(self):
         as_const = lambda v: np.random.random(v.shape.as_list()).astype(np.float32)
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/conv_func_substitutions_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/conv_func_substitutions_test.py
index 381d330cb..7c8ad6967 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/conv_func_substitutions_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/conv_func_substitutions_test.py
@@ -23,9 +23,9 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
 from tests.keras_tests.utils import get_layers_from_model_by_type
@@ -43,9 +43,9 @@ def __init__(self, unit_test):
         super().__init__(unit_test, input_shape=(32, 32, 3))
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'enable_weights_quantization': False,
+        tp = generate_test_tpc({'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_keras_tpc(name="test_no_quant", tp_model=tp)
+        return generate_keras_tpc(name="test_no_quant", tpc=tp)
 
     def create_networks(self):
         _in = tf.keras.layers.Input(self.input_shape[1:])
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/linear_collapsing_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/linear_collapsing_test.py
index cae35085f..70d5e18ae 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/linear_collapsing_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/linear_collapsing_test.py
@@ -23,7 +23,7 @@
     from keras.layers.core import TFOpLambda
 
 from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 import numpy as np
@@ -40,11 +40,11 @@ def __init__(self, unit_test):
         super(BaseConv2DCollapsingTest, self).__init__(unit_test=unit_test, input_shape=(32,32,16))
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 32,
+        tp = generate_test_tpc({'weights_n_bits': 32,
                                      'activation_n_bits': 32,
                                      'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_keras_tpc(name="linear_collapsing_test", tp_model=tp)
+        return generate_keras_tpc(name="linear_collapsing_test", tpc=tp)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.NOCLIPPING,
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/lut_quantizer.py b/tests/keras_tests/feature_networks_tests/feature_networks/lut_quantizer.py
index 33f9a42bf..03c27db77 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/lut_quantizer.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/lut_quantizer.py
@@ -26,8 +26,8 @@
 from model_compression_toolkit.core.keras.constants import KERNEL
 from mct_quantizers.keras.quantizers import ActivationLutPOTInferableQuantizer
 from mct_quantizers.common.constants import THRESHOLD, LUT_VALUES
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_attr_configs, generate_test_op_qc, \
-    generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_attr_configs, generate_test_op_qc, \
+    generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 from tests.keras_tests.utils import get_layers_from_model_by_type
@@ -60,9 +60,9 @@ def __init__(self, unit_test, weights_n_bits: int = 3, is_symmetric=False):
 
     def get_tpc(self):
         qmethod = tp.QuantizationMethod.LUT_SYM_QUANTIZER if self.is_symmetric else tp.QuantizationMethod.LUT_POT_QUANTIZER
-        tp_model = generate_test_tp_model({'weights_n_bits': self.weights_n_bits,
+        tpc = generate_test_tpc({'weights_n_bits': self.weights_n_bits,
                                            'weights_quantization_method': qmethod})
-        return generate_keras_tpc(name='lut_quantizer_test', tp_model=tp_model)
+        return generate_keras_tpc(name='lut_quantizer_test', tpc=tpc)
 
     def get_debug_config(self):
         return mct.core.DebugConfig(
@@ -105,9 +105,9 @@ def __init__(self, unit_test, activation_n_bits: int = 3):
         super().__init__(unit_test, num_calibration_iter=5, val_batch_size=32)
 
     def get_tpc(self):
-        tp_model = generate_test_tp_model({'activation_quantization_method': tp.QuantizationMethod.LUT_POT_QUANTIZER,
+        tpc = generate_test_tpc({'activation_quantization_method': tp.QuantizationMethod.LUT_POT_QUANTIZER,
                                            'activation_n_bits': self.activation_n_bits})
-        return generate_keras_tpc(name='lut_quantizer_test', tp_model=tp_model)
+        return generate_keras_tpc(name='lut_quantizer_test', tpc=tpc)
 
     def get_input_shapes(self):
         return [[self.val_batch_size, 16, 16, self.num_conv_channels]]
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/manual_bit_selection.py b/tests/keras_tests/feature_networks_tests/feature_networks/manual_bit_selection.py
index eb83f903a..7ac361a64 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/manual_bit_selection.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/manual_bit_selection.py
@@ -23,9 +23,9 @@
 from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import \
     get_config_options_by_operators_set
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_op_qc, generate_test_attr_configs, \
-    generate_custom_test_tp_model
-from tests.common_tests.helpers.tpcs_for_tests.v3.tp_model import get_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs, \
+    generate_custom_test_tpc
+from tests.common_tests.helpers.tpcs_for_tests.v3.tpc import get_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 from tests.keras_tests.tpc_keras import get_tpc_with_activation_mp_keras
 
@@ -136,17 +136,17 @@ class Manual16BitWidthSelectionTest(ManualBitWidthSelectionTest):
     Uses the manual bit width API in the "get_core_configs" method.
     """
     def get_tpc(self):
-        tpc = get_tp_model()
+        tpc = get_tpc()
         base_cfg_16 = [c for c in get_config_options_by_operators_set(tpc,
                                                                       OperatorSetNames.MUL).quantization_configurations
                        if c.activation_n_bits == 16][0].clone_and_edit()
         qco_16 = QuantizationConfigOptions(base_config=base_cfg_16,
                                            quantization_configurations=(tpc.default_qco.base_config,
                                                                         base_cfg_16))
-        tpc = generate_custom_test_tp_model(
+        tpc = generate_custom_test_tpc(
             name="custom_16_bit_tpc",
             base_cfg=tpc.default_qco.base_config,
-            base_tp_model=tpc,
+            base_tpc=tpc,
             operator_sets_dict={
                 OperatorSetNames.MUL: qco_16,
                 OperatorSetNames.GELU: qco_16,
@@ -174,7 +174,7 @@ class Manual16BitWidthSelectionMixedPrecisionTest(Manual16BitWidthSelectionTest)
     Uses the manual bit width API in the "get_core_configs" method.
     """
     def get_tpc(self):
-        tpc = get_tp_model()
+        tpc = get_tpc()
 
         mul_qco = get_config_options_by_operators_set(tpc, OperatorSetNames.MUL)
         base_cfg_16 = [l for l in mul_qco.quantization_configurations if l.activation_n_bits == 16][0]
@@ -186,10 +186,10 @@ def get_tpc(self):
         qco_16 = QuantizationConfigOptions(base_config=base_cfg_16,
                                            quantization_configurations=quantization_configurations)
 
-        tpc = generate_custom_test_tp_model(
+        tpc = generate_custom_test_tpc(
             name="custom_16_bit_tpc",
             base_cfg=tpc.default_qco.base_config,
-            base_tp_model=tpc,
+            base_tpc=tpc,
             operator_sets_dict={
                 OperatorSetNames.MUL: qco_16,
             })
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/matmul_substitution_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/matmul_substitution_test.py
index d6da8a023..bebfec1d0 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/matmul_substitution_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/matmul_substitution_test.py
@@ -24,7 +24,7 @@
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 import model_compression_toolkit as mct
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 import numpy as np
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
@@ -35,11 +35,11 @@ def __init__(self, unit_test):
         super().__init__(unit_test, input_shape=(8,))
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 16,
+        tp = generate_test_tpc({'weights_n_bits': 16,
                                      'activation_n_bits': 16,
                                      'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_keras_tpc(name="no_quantization", tp_model=tp)
+        return generate_keras_tpc(name="no_quantization", tpc=tp)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.NOCLIPPING,
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/metadata_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/metadata_test.py
index 4fc584055..bf3e317a2 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/metadata_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/metadata_test.py
@@ -17,12 +17,12 @@
 import numpy as np
 
 import model_compression_toolkit as mct
-from tests.common_tests.helpers.tpcs_for_tests.v2.tp_model import get_tp_model
+from tests.common_tests.helpers.tpcs_for_tests.v2.tpc import get_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 from mct_quantizers.keras.metadata import add_metadata, get_metadata
 
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
 
 keras = tf.keras
 layers = keras.layers
@@ -32,7 +32,7 @@
 class MetadataTest(BaseKerasFeatureNetworkTest):
 
     def get_tpc(self):
-        return get_tp_model()
+        return get_tpc()
 
     def create_networks(self):
         inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision/requires_mixed_precision_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision/requires_mixed_precision_test.py
index f0bc7f6ed..75e9c5516 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision/requires_mixed_precision_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision/requires_mixed_precision_test.py
@@ -38,7 +38,7 @@
 from tests.keras_tests.feature_networks_tests.feature_networks.weights_mixed_precision_tests import \
     MixedPrecisionBaseTest
 from tests.keras_tests.tpc_keras import get_tpc_with_activation_mp_keras, get_weights_only_mp_tpc_keras
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_op_qc, generate_test_attr_configs
+from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs
 import model_compression_toolkit as mct
 
 
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py
index 4b7c41b0e..8c4024011 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py
@@ -25,8 +25,8 @@
 from model_compression_toolkit.core.keras.constants import SIGMOID, SOFTMAX, BIAS
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, KERAS_KERNEL
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_op_qc, generate_test_attr_configs
-from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tp_model import get_op_quantization_configs
+from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs
+from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tpc import get_op_quantization_configs
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 from keras import backend as K
 
@@ -676,7 +676,7 @@ def get_tpc(self):
             base_config=cfg,
         )
 
-        tp_model = schema.TargetPlatformCapabilities(
+        tpc = schema.TargetPlatformCapabilities(
             default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([cfg]), base_config=cfg),
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -686,7 +686,7 @@ def get_tpc(self):
             add_metadata=False,
             name="mp_activation_conf_weights_test")
 
-        return tp_model
+        return tpc
 
     def get_resource_utilization(self):
         return ResourceUtilization(np.inf, 5407)
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/node_filter_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/node_filter_test.py
index 8d1c8d983..4e11c29d0 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/node_filter_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/node_filter_test.py
@@ -25,7 +25,7 @@
     get_weights_quantization_params_fn, get_activation_quantization_params_fn
 from model_compression_toolkit.core.keras.constants import KERNEL
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 from tests.keras_tests.utils import get_layers_from_model_by_type
 
@@ -57,11 +57,11 @@ def __init__(self, unit_test, activation_n_bits: int = 3, weights_n_bits: int =
         super().__init__(unit_test )
 
     def get_tpc(self):
-        tp_model = generate_test_tp_model({
+        tpc = generate_test_tpc({
             'weights_quantization_method': tp.QuantizationMethod.POWER_OF_TWO,
             'activation_n_bits': 16,
             'weights_n_bits': 16})
-        return generate_keras_tpc(name="scope_filter_test", tp_model=tp_model)
+        return generate_keras_tpc(name="scope_filter_test", tpc=tpc)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.MSE, mct.core.QuantizationErrorMethod.MSE,
@@ -139,11 +139,11 @@ def __init__(self, unit_test, activation_n_bits: int = 3, weights_n_bits: int =
         super().__init__(unit_test )
 
     def get_tpc(self):
-        tp_model = generate_test_tp_model({
+        tpc = generate_test_tpc({
             'weights_quantization_method': tp.QuantizationMethod.POWER_OF_TWO,
             'activation_n_bits': 16,
             'weights_n_bits': 16})
-        return generate_keras_tpc(name="name_filter_test", tp_model=tp_model)
+        return generate_keras_tpc(name="name_filter_test", tpc=tpc)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.MSE, mct.core.QuantizationErrorMethod.MSE,
@@ -211,11 +211,11 @@ def activations_params_fn(self):
         return get_activation_quantization_params_fn(tp.QuantizationMethod.POWER_OF_TWO)
 
     def get_tpc(self):
-        tp_model = generate_test_tp_model({
+        tpc = generate_test_tpc({
             'weights_quantization_method': tp.QuantizationMethod.POWER_OF_TWO,
             'activation_n_bits': 16,
             'weights_n_bits': 16})
-        return generate_keras_tpc(name="type_filter_test", tp_model=tp_model)
+        return generate_keras_tpc(name="type_filter_test", tpc=tpc)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.MSE, mct.core.QuantizationErrorMethod.MSE,
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/per_tensor_weight_quantization_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/per_tensor_weight_quantization_test.py
index 2ede4b748..801626f5a 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/per_tensor_weight_quantization_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/per_tensor_weight_quantization_test.py
@@ -18,7 +18,7 @@
 
 from model_compression_toolkit.constants import THRESHOLD
 from model_compression_toolkit.core.keras.constants import KERNEL
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.utils import get_layers_from_model_by_type
@@ -32,8 +32,8 @@ def __init__(self, unit_test):
         super().__init__(unit_test )
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_per_channel_threshold': False})
-        return generate_keras_tpc(name="per_tensor_weight_quantization", tp_model=tp)
+        tp = generate_test_tpc({'weights_per_channel_threshold': False})
+        return generate_keras_tpc(name="per_tensor_weight_quantization", tpc=tp)
 
     def create_networks(self):
         inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/residual_collapsing_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/residual_collapsing_test.py
index 6217407a9..150511b39 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/residual_collapsing_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/residual_collapsing_test.py
@@ -14,7 +14,7 @@
 # ==============================================================================
 import model_compression_toolkit as mct
 import tensorflow as tf
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 import numpy as np
@@ -32,11 +32,11 @@ def __init__(self, unit_test):
                                                          input_shape=(16,16,3))
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 32,
+        tp = generate_test_tpc({'weights_n_bits': 32,
                                      'activation_n_bits': 32,
                                      'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_keras_tpc(name="linear_collapsing_test", tp_model=tp)
+        return generate_keras_tpc(name="linear_collapsing_test", tpc=tp)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.NOCLIPPING,
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/reused_layer_mixed_precision_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/reused_layer_mixed_precision_test.py
index e5e648b8d..c5bd2b6fa 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/reused_layer_mixed_precision_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/reused_layer_mixed_precision_test.py
@@ -19,7 +19,7 @@
 import model_compression_toolkit as mct
 import tensorflow as tf
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_op_qc, generate_test_attr_configs
+from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 import numpy as np
 
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py
index 8885ff795..d5b2debfc 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py
@@ -35,18 +35,18 @@
 from model_compression_toolkit.core.keras.statistics_correction.apply_second_moment_correction import \
     keras_apply_second_moment_correction
 from model_compression_toolkit.core.runner import core_runner
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 from tests.keras_tests.utils import get_layers_from_model_by_type
 
-DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
+DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
 from tensorflow.keras.models import Model
 
 keras = tf.keras
@@ -68,10 +68,10 @@ def __init__(self, unit_test,
                                                    input_shape=(32, 32, 1))
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 16,
+        tp = generate_test_tpc({'weights_n_bits': 16,
                                      'activation_n_bits': 16,
                                      'weights_quantization_method': QuantizationMethod.SYMMETRIC})
-        return generate_keras_tpc(name="second_moment_correction_test", tp_model=tp)
+        return generate_keras_tpc(name="second_moment_correction_test", tpc=tp)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig(weights_second_moment_correction=True)
@@ -315,10 +315,10 @@ def __init__(self, unit_test):
                          linear_layer=layers.Conv2D)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 16,
+        tp = generate_test_tpc({'weights_n_bits': 16,
                                      'activation_n_bits': 16,
                                      'weights_quantization_method': QuantizationMethod.POWER_OF_TWO})
-        return generate_keras_tpc(name="second_moment_correction_test", tp_model=tp)
+        return generate_keras_tpc(name="second_moment_correction_test", tpc=tp)
 
     def create_networks(self):
         inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
@@ -454,10 +454,10 @@ def __init__(self, unit_test):
                          linear_layer=layers.Conv2D)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 16,
+        tp = generate_test_tpc({'weights_n_bits': 16,
                                      'activation_n_bits': 16,
                                      'weights_quantization_method': QuantizationMethod.UNIFORM})
-        return generate_keras_tpc(name="second_moment_correction_test", tp_model=tp)
+        return generate_keras_tpc(name="second_moment_correction_test", tpc=tp)
 
     def create_networks(self):
         inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/sigmoid_mul_substitution_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/sigmoid_mul_substitution_test.py
index 4cb9fff35..9586fa1ea 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/sigmoid_mul_substitution_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/sigmoid_mul_substitution_test.py
@@ -22,7 +22,7 @@
     from keras.activations import sigmoid
 
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
 from tests.keras_tests.utils import get_layers_from_model_by_type
@@ -34,9 +34,9 @@
 class SigMulSubstitutionTest(BaseKerasFeatureNetworkTest):
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'enable_weights_quantization': False,
+        tp = generate_test_tpc({'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_keras_tpc(name="test_no_quant", tp_model=tp)
+        return generate_keras_tpc(name="test_no_quant", tpc=tp)
 
     def create_networks(self):
         _in = tf.keras.layers.Input(self.input_shape[1:])
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/symmetric_threshold_selection_activation_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/symmetric_threshold_selection_activation_test.py
index 845dfddee..93e1940eb 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/symmetric_threshold_selection_activation_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/symmetric_threshold_selection_activation_test.py
@@ -19,7 +19,7 @@
 
 from mct_quantizers import KerasActivationQuantizationHolder
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 import model_compression_toolkit as mct
 from tests.keras_tests.utils import get_layers_from_model_by_type
@@ -38,10 +38,10 @@ def generate_inputs(self):
         return [np.random.uniform(low=-7, high=7, size=in_shape) for in_shape in self.get_input_shapes()]
 
     def get_tpc(self):
-        tp_model = generate_test_tp_model({
+        tpc = generate_test_tpc({
             'activation_quantization_method': tp.QuantizationMethod.SYMMETRIC,
             'activation_n_bits': 8})
-        return generate_keras_tpc(name="symmetric_threshold_test", tp_model=tp_model)
+        return generate_keras_tpc(name="symmetric_threshold_test", tpc=tpc)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig(activation_error_method=self.activation_threshold_method)
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/test_kmeans_quantizer.py b/tests/keras_tests/feature_networks_tests/feature_networks/test_kmeans_quantizer.py
index a1499b5c1..c96d414dc 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/test_kmeans_quantizer.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/test_kmeans_quantizer.py
@@ -25,7 +25,7 @@
 
 from model_compression_toolkit.core.keras.constants import KERNEL
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 import numpy as np
 
@@ -66,10 +66,10 @@ def __init__(self,
         super().__init__(unit_test, num_calibration_iter=5, val_batch_size=32)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_quantization_method': self.quantization_method,
+        tp = generate_test_tpc({'weights_quantization_method': self.quantization_method,
                                      'weights_n_bits': self.weights_n_bits,
                                      'activation_n_bits': 4})
-        return generate_keras_tpc(name="kmean_quantizer_test", tp_model=tp)
+        return generate_keras_tpc(name="kmean_quantizer_test", tpc=tp)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.MSE, mct.core.QuantizationErrorMethod.MSE,
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/tpc_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/tpc_test.py
index 55e54a2b5..4904890f9 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/tpc_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/tpc_test.py
@@ -32,8 +32,8 @@ def __init__(self, tpc_name: str, *args, **kwargs):
         self.tpc_name = tpc_name
 
     def get_tpc(self):
-        tp_model_name, tp_version = self.tpc_name.split('.')
-        return mct.get_target_platform_capabilities(TENSORFLOW, tp_model_name, tp_version)
+        tpc_name, tp_version = self.tpc_name.split('.')
+        return mct.get_target_platform_capabilities(TENSORFLOW, tpc_name, tp_version)
 
     def create_networks(self):
         inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/uniform_range_selection_activation_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/uniform_range_selection_activation_test.py
index 3501a749a..9b54fc546 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/uniform_range_selection_activation_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/uniform_range_selection_activation_test.py
@@ -20,7 +20,7 @@
 
 from mct_quantizers import KerasActivationQuantizationHolder
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 import model_compression_toolkit as mct
 from tests.keras_tests.utils import get_layers_from_model_by_type
@@ -42,10 +42,10 @@ def get_quantization_config(self):
         return mct.core.QuantizationConfig(activation_error_method=self.activation_threshold_method)
 
     def get_tpc(self):
-        tp_model = generate_test_tp_model({
+        tpc = generate_test_tpc({
             'activation_quantization_method': tp.QuantizationMethod.UNIFORM,
             'activation_n_bits': 8})
-        return generate_keras_tpc(name="uniform_range_test", tp_model=tp_model)
+        return generate_keras_tpc(name="uniform_range_test", tpc=tpc)
 
     def create_networks(self):
         inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py b/tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py
index 9839afe3f..05e174f76 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py
@@ -27,7 +27,7 @@
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
     get_op_quantization_configs, generate_keras_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_op_qc, generate_test_attr_configs
+from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 
 import model_compression_toolkit as mct
@@ -199,7 +199,7 @@ def get_tpc(self):
             base_config=two_bit_cfg,
         )
 
-        tp_model = schema.TargetPlatformCapabilities(
+        tpc = schema.TargetPlatformCapabilities(
             default_qco=weight_fixed_cfg,
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -209,7 +209,7 @@ def get_tpc(self):
             add_metadata=False,
             name="mp_part_weights_layers_test")
 
-        return tp_model
+        return tpc
 
     def create_networks(self):
         inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
@@ -522,7 +522,7 @@ def get_tpc(self):
             base_config=cfg,
         )
 
-        tp_model = schema.TargetPlatformCapabilities(
+        tpc = schema.TargetPlatformCapabilities(
             default_qco=schema.QuantizationConfigOptions(quantization_configurations=tuple([cfg]), base_config=cfg),
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -532,7 +532,7 @@ def get_tpc(self):
             add_metadata=False,
             name="mp_weights_conf_act_test")
 
-        return tp_model
+        return tpc
 
     def get_resource_utilization(self):
         return ResourceUtilization(1535)
diff --git a/tests/keras_tests/feature_networks_tests/test_features_runner.py b/tests/keras_tests/feature_networks_tests/test_features_runner.py
index 00492c1a9..756b602f0 100644
--- a/tests/keras_tests/feature_networks_tests/test_features_runner.py
+++ b/tests/keras_tests/feature_networks_tests/test_features_runner.py
@@ -863,9 +863,9 @@ def test_metadata(self):
         MetadataTest(self).run_test()
 
     def test_keras_tpcs(self):
-        TpcTest(f'{C.IMX500_TP_MODEL}.v1', self).run_test()
-        TpcTest(f'{C.TFLITE_TP_MODEL}.v1', self).run_test()
-        TpcTest(f'{C.QNNPACK_TP_MODEL}.v1', self).run_test()
+        TpcTest(f'{C.IMX500_TPC}.v1', self).run_test()
+        TpcTest(f'{C.TFLITE_TPC}.v1', self).run_test()
+        TpcTest(f'{C.QNNPACK_TPC}.v1', self).run_test()
 
     def test_sigmoid_mul_substitution(self):
         SigMulSubstitutionTest(self).run_test()
diff --git a/tests/keras_tests/function_tests/test_activation_weights_composition_substitution.py b/tests/keras_tests/function_tests/test_activation_weights_composition_substitution.py
index 1cccf7553..bc6fd3c4a 100644
--- a/tests/keras_tests/function_tests/test_activation_weights_composition_substitution.py
+++ b/tests/keras_tests/function_tests/test_activation_weights_composition_substitution.py
@@ -23,7 +23,7 @@
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_op_qc, generate_test_attr_configs
+from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs
 
 if version.parse(tf.__version__) >= version.parse("2.13"):
     from keras.src.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Dense, BatchNormalization, ReLU, Input, Add, InputLayer
diff --git a/tests/keras_tests/function_tests/test_cfg_candidates_filter.py b/tests/keras_tests/function_tests/test_cfg_candidates_filter.py
index eeeb898e0..a01269ddf 100644
--- a/tests/keras_tests/function_tests/test_cfg_candidates_filter.py
+++ b/tests/keras_tests/function_tests/test_cfg_candidates_filter.py
@@ -29,7 +29,7 @@
 from model_compression_toolkit.core.common.fusion.layer_fusing import fusion
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_attr_configs, generate_test_op_qc
+from tests.common_tests.helpers.generate_test_tpc import generate_test_attr_configs, generate_test_op_qc
 from tests.keras_tests.tpc_keras import get_tpc_with_activation_mp_keras
 
 tp = mct.target_platform
diff --git a/tests/keras_tests/function_tests/test_custom_layer.py b/tests/keras_tests/function_tests/test_custom_layer.py
index 267e7122f..49287ebd0 100644
--- a/tests/keras_tests/function_tests/test_custom_layer.py
+++ b/tests/keras_tests/function_tests/test_custom_layer.py
@@ -24,7 +24,7 @@
 from model_compression_toolkit.target_platform_capabilities.constants import BIAS_ATTR, KERNEL_ATTR
 from model_compression_toolkit.target_platform_capabilities.target_platform import LayerFilterParams
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_attr_configs, DEFAULT_WEIGHT_ATTR_CONFIG, \
+from tests.common_tests.helpers.generate_test_tpc import generate_test_attr_configs, DEFAULT_WEIGHT_ATTR_CONFIG, \
     KERNEL_BASE_CONFIG, BIAS_CONFIG
 
 keras = tf.keras
@@ -85,14 +85,14 @@ def get_tpc():
                                         qc_options=default_configuration_options.clone_and_edit(
                                             enable_activation_quantization=False)
                                         .clone_and_edit_weight_attribute(enable_weights_quantization=False))]
-    tp_model = schema.TargetPlatformCapabilities(default_qco=default_configuration_options,
+    tpc = schema.TargetPlatformCapabilities(default_qco=default_configuration_options,
                                                  operator_set=tuple(operator_set),
                                                  tpc_minor_version=None,
                                                  tpc_patch_version=None,
                                                  tpc_platform_type=None,
                                                  add_metadata=False)
 
-    return tp_model
+    return tpc
 
 
 class TestCustomLayer(unittest.TestCase):
diff --git a/tests/keras_tests/function_tests/test_export_keras_fully_quantized_model.py b/tests/keras_tests/function_tests/test_export_keras_fully_quantized_model.py
index fc39f013e..18b00d0b8 100644
--- a/tests/keras_tests/function_tests/test_export_keras_fully_quantized_model.py
+++ b/tests/keras_tests/function_tests/test_export_keras_fully_quantized_model.py
@@ -27,12 +27,12 @@
 from model_compression_toolkit.trainable_infrastructure.keras.load_model import \
     keras_load_quantized_model
 
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
 from model_compression_toolkit.constants import TENSORFLOW
 from model_compression_toolkit import get_target_platform_capabilities
 from tests.keras_tests.utils import get_layers_from_model_by_type
 
-DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
+DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
 
 _, SAVED_EXPORTABLE_MODEL_PATH_TF = tempfile.mkstemp('.h5')
 _, SAVED_MODEL_PATH_TF = tempfile.mkstemp('.h5')
diff --git a/tests/keras_tests/function_tests/test_get_gptq_config.py b/tests/keras_tests/function_tests/test_get_gptq_config.py
index f7d2a0e0a..fb3515050 100644
--- a/tests/keras_tests/function_tests/test_get_gptq_config.py
+++ b/tests/keras_tests/function_tests/test_get_gptq_config.py
@@ -29,7 +29,7 @@
 from model_compression_toolkit.gptq.common.gptq_constants import QUANT_PARAM_LEARNING_STR, MAX_LSB_STR
 from model_compression_toolkit.gptq.keras.gptq_loss import multiple_tensors_mse_loss
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 
 layers = tf.keras.layers
 SHAPE = [1, 16, 16, 3]
@@ -131,11 +131,11 @@ def setUp(self):
                                                           optimizer=tf.keras.optimizers.Adam(),
                                                           regularization_factor=0.001)]
 
-        pot_tp = generate_test_tp_model({'weights_quantization_method': QuantizationMethod.POWER_OF_TWO})
-        self.pot_weights_tpc = generate_keras_tpc(name="gptq_pot_config_test", tp_model=pot_tp)
+        pot_tp = generate_test_tpc({'weights_quantization_method': QuantizationMethod.POWER_OF_TWO})
+        self.pot_weights_tpc = generate_keras_tpc(name="gptq_pot_config_test", tpc=pot_tp)
 
-        symmetric_tp = generate_test_tp_model({'weights_quantization_method': QuantizationMethod.SYMMETRIC})
-        self.symmetric_weights_tpc = generate_keras_tpc(name="gptq_symmetric_config_test", tp_model=symmetric_tp)
+        symmetric_tp = generate_test_tpc({'weights_quantization_method': QuantizationMethod.SYMMETRIC})
+        self.symmetric_weights_tpc = generate_keras_tpc(name="gptq_symmetric_config_test", tpc=symmetric_tp)
 
     def test_get_keras_gptq_config_pot(self):
         # This call removes the effect of @tf.function decoration and executes the decorated function eagerly, which
diff --git a/tests/keras_tests/function_tests/test_hmse_error_method.py b/tests/keras_tests/function_tests/test_hmse_error_method.py
index 2a4de3251..d64dbb5b7 100644
--- a/tests/keras_tests/function_tests/test_hmse_error_method.py
+++ b/tests/keras_tests/function_tests/test_hmse_error_method.py
@@ -39,7 +39,7 @@
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
     get_op_quantization_configs
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_configs
 
 tp = mct.target_platform
@@ -69,7 +69,7 @@ def representative_dataset():
 
 
 def get_tpc(quant_method, per_channel):
-    tp = generate_test_tp_model(edit_params_dict={
+    tp = generate_test_tpc(edit_params_dict={
         'weights_quantization_method': quant_method,
         'weights_per_channel_threshold': per_channel})
 
@@ -190,7 +190,7 @@ def _generate_bn_quantization_tpc(quant_method, per_channel):
                                              {GAMMA: AttributeQuantizationConfig(weights_n_bits=8,
                                                                                  enable_weights_quantization=True)})
 
-            tp_model = schema.TargetPlatformCapabilities(default_qco=conv_qco,
+            tpc = schema.TargetPlatformCapabilities(default_qco=conv_qco,
                                                          tpc_minor_version=None,
                                                          tpc_patch_version=None,
                                                          tpc_platform_type=None,
@@ -199,7 +199,7 @@ def _generate_bn_quantization_tpc(quant_method, per_channel):
                                                        schema.OperatorsSet(name="BN", qc_options=bn_qco)]),
                                                          add_metadata=False)
 
-            return tp_model
+            return tpc
 
         self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.SYMMETRIC, per_channel=True,
                               tpc_fn=_generate_bn_quantization_tpc, model_gen_fn=no_bn_fusion_model_gen)
diff --git a/tests/keras_tests/function_tests/test_kl_error_quantization_configurations.py b/tests/keras_tests/function_tests/test_kl_error_quantization_configurations.py
index 133d5da36..4a6a4a018 100644
--- a/tests/keras_tests/function_tests/test_kl_error_quantization_configurations.py
+++ b/tests/keras_tests/function_tests/test_kl_error_quantization_configurations.py
@@ -20,7 +20,7 @@
 import tensorflow as tf
 from tensorflow.keras import layers
 import itertools
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 
 
 def model_gen():
@@ -55,12 +55,12 @@ def representative_data_gen():
         model = model_gen()
         for quantize_method, error_method, per_channel in weights_test_combinations:
 
-            tp_model = generate_test_tp_model({
+            tpc = generate_test_tpc({
                 'weights_quantization_method': quantize_method,
                 'weights_n_bits': 8,
                 'activation_n_bits': 8,
                 'weights_per_channel_threshold': per_channel})
-            tpc = generate_keras_tpc(name="kl_quant_config_weights_test", tp_model=tp_model)
+            tpc = generate_keras_tpc(name="kl_quant_config_weights_test", tpc=tpc)
 
             qc = mct.core.QuantizationConfig(activation_error_method=mct.core.QuantizationErrorMethod.NOCLIPPING,
                                              weights_error_method=error_method, relu_bound_to_power_of_2=False,
@@ -74,12 +74,12 @@ def representative_data_gen():
 
         model = model_gen()
         for quantize_method, error_method, relu_bound_to_power_of_2 in activation_test_combinations:
-            tp = generate_test_tp_model({
+            tp = generate_test_tpc({
                 'activation_quantization_method': quantize_method,
                 'weights_n_bits': 8,
                 'activation_n_bits': 8,
                 'enable_weights_quantization': False})
-            tpc = generate_keras_tpc(name="kl_quant_config_activation_test", tp_model=tp)
+            tpc = generate_keras_tpc(name="kl_quant_config_activation_test", tpc=tp)
 
             qc = mct.core.QuantizationConfig(activation_error_method=error_method,
                                              relu_bound_to_power_of_2=relu_bound_to_power_of_2,
diff --git a/tests/keras_tests/function_tests/test_quant_config_filtering.py b/tests/keras_tests/function_tests/test_quant_config_filtering.py
index 281009a4c..20b4b33aa 100644
--- a/tests/keras_tests/function_tests/test_quant_config_filtering.py
+++ b/tests/keras_tests/function_tests/test_quant_config_filtering.py
@@ -24,8 +24,8 @@
     get_config_options_by_operators_set
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
-from tests.common_tests.helpers.generate_test_tp_model import generate_custom_test_tp_model
-from tests.common_tests.helpers.tpcs_for_tests.v3.tp_model import get_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_custom_test_tpc
+from tests.common_tests.helpers.tpcs_for_tests.v3.tpc import get_tpc
 
 if tf.__version__ >= "2.13":
     from keras.src.layers import TFOpLambda
@@ -40,17 +40,17 @@ class TestKerasQuantConfigFiltering(unittest.TestCase):
 
     @staticmethod
     def get_tpc_default_16bit():
-        tpc = get_tp_model()
+        tpc = get_tpc()
         base_cfg_16 = [c for c in get_config_options_by_operators_set(tpc,
                                                                       OperatorSetNames.MUL).quantization_configurations
                        if c.activation_n_bits == 16][0].clone_and_edit()
         qco_16 = QuantizationConfigOptions(base_config=base_cfg_16,
                                            quantization_configurations=(tpc.default_qco.base_config,
                                                                         base_cfg_16))
-        tpc = generate_custom_test_tp_model(
+        tpc = generate_custom_test_tpc(
             name="custom_16_bit_tpc",
             base_cfg=tpc.default_qco.base_config,
-            base_tp_model=tpc,
+            base_tpc=tpc,
             operator_sets_dict={
                 OperatorSetNames.MUL: qco_16,
                 OperatorSetNames.GELU: qco_16,
diff --git a/tests/keras_tests/function_tests/test_quantization_configurations.py b/tests/keras_tests/function_tests/test_quantization_configurations.py
index a4a505f2c..5b44c1c3b 100644
--- a/tests/keras_tests/function_tests/test_quantization_configurations.py
+++ b/tests/keras_tests/function_tests/test_quantization_configurations.py
@@ -23,7 +23,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 
 
 def model_gen():
@@ -65,12 +65,12 @@ def representative_data_gen():
 
         model = model_gen()
         for quantize_method, error_method, bias_correction, per_channel, input_scaling in weights_test_combinations:
-            tp = generate_test_tp_model({
+            tp = generate_test_tpc({
                 'weights_quantization_method': quantize_method,
                 'weights_n_bits': 8,
                 'activation_n_bits': 16,
                 'weights_per_channel_threshold': per_channel})
-            tpc = generate_keras_tpc(name="quant_config_weights_test", tp_model=tp)
+            tpc = generate_keras_tpc(name="quant_config_weights_test", tpc=tp)
 
             qc = mct.core.QuantizationConfig(activation_error_method=mct.core.QuantizationErrorMethod.NOCLIPPING,
                                              weights_error_method=error_method, relu_bound_to_power_of_2=False,
@@ -83,11 +83,11 @@ def representative_data_gen():
 
         model = model_gen()
         for quantize_method, error_method, relu_bound_to_power_of_2, shift_negative_correction in activation_test_combinations:
-            tp = generate_test_tp_model({
+            tp = generate_test_tpc({
                 'activation_quantization_method': quantize_method,
                 'weights_n_bits': 16,
                 'activation_n_bits': 8})
-            tpc = generate_keras_tpc(name="quant_config_activation_test", tp_model=tp)
+            tpc = generate_keras_tpc(name="quant_config_activation_test", tpc=tp)
 
             qc = mct.core.QuantizationConfig(activation_error_method=error_method,
                                              weights_error_method=mct.core.QuantizationErrorMethod.NOCLIPPING,
diff --git a/tests/keras_tests/function_tests/test_resource_utilization_data.py b/tests/keras_tests/function_tests/test_resource_utilization_data.py
index cd8b4d49e..a76e988af 100644
--- a/tests/keras_tests/function_tests/test_resource_utilization_data.py
+++ b/tests/keras_tests/function_tests/test_resource_utilization_data.py
@@ -27,7 +27,7 @@
 from model_compression_toolkit.core.keras.graph_substitutions.substitutions.separableconv_decomposition import \
     POINTWISE_KERNEL
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_op_qc, generate_test_attr_configs
+from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs
 from tests.keras_tests.tpc_keras import get_tpc_with_activation_mp_keras
 
 
diff --git a/tests/keras_tests/function_tests/test_set_layer_to_bitwidth.py b/tests/keras_tests/function_tests/test_set_layer_to_bitwidth.py
index c612163cc..29c61dc24 100644
--- a/tests/keras_tests/function_tests/test_set_layer_to_bitwidth.py
+++ b/tests/keras_tests/function_tests/test_set_layer_to_bitwidth.py
@@ -42,7 +42,7 @@
 from model_compression_toolkit.core.keras.mixed_precision.configurable_weights_quantizer import \
     ConfigurableWeightsQuantizer
 from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_quantization_parameters
-from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tp_model import get_op_quantization_configs
+from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tpc import get_op_quantization_configs
 from tests.keras_tests.tpc_keras import get_tpc_with_activation_mp_keras, get_weights_only_mp_tpc_keras
 
 
diff --git a/tests/keras_tests/function_tests/test_symmetric_threshold_selection_weights.py b/tests/keras_tests/function_tests/test_symmetric_threshold_selection_weights.py
index cb3108d23..745273eb1 100644
--- a/tests/keras_tests/function_tests/test_symmetric_threshold_selection_weights.py
+++ b/tests/keras_tests/function_tests/test_symmetric_threshold_selection_weights.py
@@ -27,7 +27,7 @@
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_quantization_parameters
 
 
@@ -57,10 +57,10 @@ def representative_dataset():
 
 
 def get_tpc(per_channel):
-    tp = generate_test_tp_model(edit_params_dict={
+    tp = generate_test_tpc(edit_params_dict={
         'weights_quantization_method': mct.target_platform.QuantizationMethod.SYMMETRIC,
         'weights_per_channel_threshold': per_channel})
-    tpc = generate_keras_tpc(name="symmetric_threshold_selection_test", tp_model=tp)
+    tpc = generate_keras_tpc(name="symmetric_threshold_selection_test", tpc=tp)
 
     return tpc
 
diff --git a/tests/keras_tests/function_tests/test_uniform_range_selection_weights.py b/tests/keras_tests/function_tests/test_uniform_range_selection_weights.py
index 91a584ccb..e7087b2b6 100644
--- a/tests/keras_tests/function_tests/test_uniform_range_selection_weights.py
+++ b/tests/keras_tests/function_tests/test_uniform_range_selection_weights.py
@@ -27,7 +27,7 @@
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_quantization_parameters
 
 
@@ -56,10 +56,10 @@ def representative_dataset():
 
 
 def get_tpc(per_channel):
-    tp = generate_test_tp_model({
+    tp = generate_test_tpc({
         'weights_quantization_method': mct.target_platform.QuantizationMethod.UNIFORM,
         'weights_per_channel_threshold': per_channel})
-    tpc = generate_keras_tpc(name="uniform_range_selection_test", tp_model=tp)
+    tpc = generate_keras_tpc(name="uniform_range_selection_test", tpc=tp)
 
     return tpc
 
diff --git a/tests/keras_tests/layer_tests/base_keras_layer_test.py b/tests/keras_tests/layer_tests/base_keras_layer_test.py
index eee4e9ede..8bc0a9283 100644
--- a/tests/keras_tests/layer_tests/base_keras_layer_test.py
+++ b/tests/keras_tests/layer_tests/base_keras_layer_test.py
@@ -7,7 +7,7 @@
 from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper
 from model_compression_toolkit.ptq import keras_post_training_quantization
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.keras_tests.tpc_keras import get_quantization_disabled_keras_tpc
 from packaging import version
 if version.parse(tf.__version__) >= version.parse("2.13"):
@@ -75,9 +75,9 @@ def get_tpc(self):
             # Disable all features that are enabled by default:
             return get_quantization_disabled_keras_tpc("float_layer_test")
         elif self.current_mode == LayerTestMode.QUANTIZED_8_BITS:
-            tp = generate_test_tp_model({'weights_n_bits': 8,
+            tp = generate_test_tpc({'weights_n_bits': 8,
                                          'activation_n_bits': 8})
-            return generate_keras_tpc(name="8bit_layer_test", tp_model=tp)
+            return generate_keras_tpc(name="8bit_layer_test", tpc=tp)
         else:
             raise NotImplemented
 
diff --git a/tests/keras_tests/non_parallel_tests/test_keras_tp_model.py b/tests/keras_tests/non_parallel_tests/test_keras_tpc.py
similarity index 97%
rename from tests/keras_tests/non_parallel_tests/test_keras_tp_model.py
rename to tests/keras_tests/non_parallel_tests/test_keras_tpc.py
index a141cf378..63cf0950b 100644
--- a/tests/keras_tests/non_parallel_tests/test_keras_tp_model.py
+++ b/tests/keras_tests/non_parallel_tests/test_keras_tpc.py
@@ -25,7 +25,7 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.defaultdict import DefaultDict
 from model_compression_toolkit.core.common import BaseNode
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_op_qc, generate_test_attr_configs
+from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs
 
 if version.parse(tf.__version__) >= version.parse("2.13"):
     from keras.src.layers import Conv2D, Conv2DTranspose, ReLU, Activation, BatchNormalization
@@ -42,8 +42,8 @@
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import \
     Greater, \
     Smaller, GreaterEq, Eq, SmallerEq, Contains
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL, IMX500_TP_MODEL, \
-    QNNPACK_TP_MODEL, TFLITE_TP_MODEL, KERNEL_ATTR, BIAS_ATTR, KERAS_KERNEL, BIAS, WEIGHTS_N_BITS
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC, IMX500_TPC, \
+    QNNPACK_TPC, TFLITE_TPC, KERNEL_ATTR, BIAS_ATTR, KERAS_KERNEL, BIAS, WEIGHTS_N_BITS
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
 
 tp = mct.target_platform
@@ -297,7 +297,7 @@ def test_get_default_op_qc(self):
 
 class TestGetKerasTPC(unittest.TestCase):
     def test_get_keras_tpc(self):
-        tpc = mct.get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
+        tpc = mct.get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
         input_shape = (1, 8, 8, 3)
         input_tensor = Input(shape=input_shape[1:])
         conv = Conv2D(3, 3)(input_tensor)
@@ -322,16 +322,16 @@ def rep_data():
                                                                       target_platform_capabilities=tpc)
 
     def test_get_keras_supported_version(self):
-        tpc = mct.get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)  # Latest
+        tpc = mct.get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)  # Latest
         self.assertTrue(tpc.tpc_minor_version == 1)
 
-        tpc = mct.get_target_platform_capabilities(TENSORFLOW, IMX500_TP_MODEL, "v1")
+        tpc = mct.get_target_platform_capabilities(TENSORFLOW, IMX500_TPC, "v1")
         self.assertTrue(tpc.tpc_minor_version == 1)
 
-        tpc = mct.get_target_platform_capabilities(TENSORFLOW, TFLITE_TP_MODEL, "v1")
+        tpc = mct.get_target_platform_capabilities(TENSORFLOW, TFLITE_TPC, "v1")
         self.assertTrue(tpc.tpc_minor_version == 1)
 
-        tpc = mct.get_target_platform_capabilities(TENSORFLOW, QNNPACK_TP_MODEL, "v1")
+        tpc = mct.get_target_platform_capabilities(TENSORFLOW, QNNPACK_TPC, "v1")
         self.assertTrue(tpc.tpc_minor_version == 1)
 
     def test_get_keras_not_supported_platform(self):
@@ -341,10 +341,10 @@ def test_get_keras_not_supported_platform(self):
 
     def test_get_keras_not_supported_fw(self):
         with self.assertRaises(Exception) as e:
-            mct.get_target_platform_capabilities("ONNX", DEFAULT_TP_MODEL)
+            mct.get_target_platform_capabilities("ONNX", DEFAULT_TPC)
         self.assertTrue(e.exception)
 
     def test_get_keras_not_supported_version(self):
         with self.assertRaises(Exception) as e:
-            mct.get_target_platform_capabilities(TENSORFLOW, IMX500_TP_MODEL, "v0")
+            mct.get_target_platform_capabilities(TENSORFLOW, IMX500_TPC, "v0")
         self.assertTrue(e.exception)
diff --git a/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py b/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py
index 4d2d16aaa..de1e8ec46 100644
--- a/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py
+++ b/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py
@@ -28,7 +28,7 @@
 from model_compression_toolkit.core import QuantizationConfig
 from model_compression_toolkit.core.common.visualization.final_config_visualizer import \
     ActivationFinalBitwidthConfigVisualizer
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
 from model_compression_toolkit.logger import Logger
@@ -37,7 +37,7 @@
     AttachTpcToKeras
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs
-from tests.common_tests.helpers.generate_test_tp_model import generate_tp_model_with_activation_mp
+from tests.common_tests.helpers.generate_test_tpc import generate_tpc_with_activation_mp
 from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_set_bit_widths
 from model_compression_toolkit.core.common.mixed_precision.distance_weighting import MpDistanceWeighting
 from model_compression_toolkit.core.common.similarity_analyzer import compute_mse
@@ -139,13 +139,13 @@ def tensorboard_initial_graph_num_of_nodes(self, num_event_files, event_to_test)
     def plot_tensor_sizes(self, core_config):
         model = SingleOutputNet()
         base_config, _, default_config = get_op_quantization_configs()
-        tpc_model = generate_tp_model_with_activation_mp(
+        tpc_model = generate_tpc_with_activation_mp(
             base_cfg=base_config,
             default_config=default_config,
             mp_bitwidth_candidates_list=[(8, 8), (8, 4), (8, 2),
                                          (4, 8), (4, 4), (4, 2),
                                          (2, 8), (2, 4), (2, 2)])
-        tpc = generate_keras_tpc(name='mp_keras_tpc', tp_model=tpc_model)
+        tpc = generate_keras_tpc(name='mp_keras_tpc', tpc=tpc_model)
         fqc =AttachTpcToKeras().attach(tpc, core_config.quantization_config.custom_tpc_opset_to_layer)
 
         # Hessian service assumes core should be initialized. This test does not do it, so we disable the use of hessians in MP
diff --git a/tests/keras_tests/pruning_tests/feature_networks/networks_tests/conv2d_conv2dtranspose_pruning_test.py b/tests/keras_tests/pruning_tests/feature_networks/networks_tests/conv2d_conv2dtranspose_pruning_test.py
index fbd92e8b6..55ce833f4 100644
--- a/tests/keras_tests/pruning_tests/feature_networks/networks_tests/conv2d_conv2dtranspose_pruning_test.py
+++ b/tests/keras_tests/pruning_tests/feature_networks/networks_tests/conv2d_conv2dtranspose_pruning_test.py
@@ -17,7 +17,7 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.common_tests.pruning.constant_importance_metric import add_const_importance_metric, \
     ConstImportanceMetric
 
@@ -59,8 +59,8 @@ def create_networks(self):
 
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'simd_size': self.simd})
-        return generate_keras_tpc(name="simd_test", tp_model=tp)
+        tp = generate_test_tpc({'simd_size': self.simd})
+        return generate_keras_tpc(name="simd_test", tpc=tp)
 
     def get_pruning_config(self):
         if self.use_constant_importance_metric:
diff --git a/tests/keras_tests/pruning_tests/feature_networks/networks_tests/conv2d_pruning_test.py b/tests/keras_tests/pruning_tests/feature_networks/networks_tests/conv2d_pruning_test.py
index 29d05b571..04fb8ebfc 100644
--- a/tests/keras_tests/pruning_tests/feature_networks/networks_tests/conv2d_pruning_test.py
+++ b/tests/keras_tests/pruning_tests/feature_networks/networks_tests/conv2d_pruning_test.py
@@ -17,7 +17,7 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.common_tests.pruning.constant_importance_metric import ConstImportanceMetric, \
     add_const_importance_metric
 import numpy as np
@@ -48,8 +48,8 @@ def __init__(self,
         self.use_constant_importance_metric = use_constant_importance_metric
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'simd_size': self.simd})
-        return generate_keras_tpc(name="simd_test", tp_model=tp)
+        tp = generate_test_tpc({'simd_size': self.simd})
+        return generate_keras_tpc(name="simd_test", tpc=tp)
 
     def get_pruning_config(self):
         if self.use_constant_importance_metric:
diff --git a/tests/keras_tests/pruning_tests/feature_networks/networks_tests/conv2dtranspose_conv2d_pruning_test.py b/tests/keras_tests/pruning_tests/feature_networks/networks_tests/conv2dtranspose_conv2d_pruning_test.py
index ab7f84db6..677022d9e 100644
--- a/tests/keras_tests/pruning_tests/feature_networks/networks_tests/conv2dtranspose_conv2d_pruning_test.py
+++ b/tests/keras_tests/pruning_tests/feature_networks/networks_tests/conv2dtranspose_conv2d_pruning_test.py
@@ -18,7 +18,7 @@
 import numpy as np
 import model_compression_toolkit as mct
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.common_tests.pruning.constant_importance_metric import add_const_importance_metric, \
     ConstImportanceMetric
 
@@ -45,8 +45,8 @@ def __init__(self,
         self.use_constant_importance_metric = use_constant_importance_metric
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'simd_size': self.simd})
-        return generate_keras_tpc(name="simd_test", tp_model=tp)
+        tp = generate_test_tpc({'simd_size': self.simd})
+        return generate_keras_tpc(name="simd_test", tpc=tp)
 
     def get_pruning_config(self):
         if self.use_constant_importance_metric:
diff --git a/tests/keras_tests/pruning_tests/feature_networks/networks_tests/conv2dtranspose_pruning_test.py b/tests/keras_tests/pruning_tests/feature_networks/networks_tests/conv2dtranspose_pruning_test.py
index 4ec74bf08..9c4c7fa2b 100644
--- a/tests/keras_tests/pruning_tests/feature_networks/networks_tests/conv2dtranspose_pruning_test.py
+++ b/tests/keras_tests/pruning_tests/feature_networks/networks_tests/conv2dtranspose_pruning_test.py
@@ -18,7 +18,7 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.common_tests.pruning.constant_importance_metric import add_const_importance_metric, \
     ConstImportanceMetric
 
@@ -61,8 +61,8 @@ def create_networks(self):
         return model
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'simd_size': self.simd})
-        return generate_keras_tpc(name="simd_test", tp_model=tp)
+        tp = generate_test_tpc({'simd_size': self.simd})
+        return generate_keras_tpc(name="simd_test", tpc=tp)
 
     def get_pruning_config(self):
         if self.use_constant_importance_metric:
diff --git a/tests/keras_tests/pruning_tests/feature_networks/networks_tests/dense_pruning_test.py b/tests/keras_tests/pruning_tests/feature_networks/networks_tests/dense_pruning_test.py
index 59246e893..aa37bcf9d 100644
--- a/tests/keras_tests/pruning_tests/feature_networks/networks_tests/dense_pruning_test.py
+++ b/tests/keras_tests/pruning_tests/feature_networks/networks_tests/dense_pruning_test.py
@@ -18,7 +18,7 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.common_tests.pruning.constant_importance_metric import add_const_importance_metric, \
     ConstImportanceMetric
 
@@ -50,8 +50,8 @@ def __init__(self,
         self.use_constant_importance_metric = use_constant_importance_metric
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'simd_size': self.simd})
-        return generate_keras_tpc(name="simd_test", tp_model=tp)
+        tp = generate_test_tpc({'simd_size': self.simd})
+        return generate_keras_tpc(name="simd_test", tpc=tp)
 
     def get_pruning_config(self):
         if self.use_constant_importance_metric:
diff --git a/tests/keras_tests/tpc_keras.py b/tests/keras_tests/tpc_keras.py
index 374eec308..d73e48c0d 100644
--- a/tests/keras_tests/tpc_keras.py
+++ b/tests/keras_tests/tpc_keras.py
@@ -28,8 +28,8 @@
 
 import model_compression_toolkit as mct
 
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model, \
-    generate_mixed_precision_test_tp_model, generate_tp_model_with_activation_mp
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc, \
+    generate_mixed_precision_test_tpc, generate_tpc_with_activation_mp
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 
 tp = mct.target_platform
@@ -39,54 +39,54 @@ def get_tpc(name, weight_bits=8, activation_bits=8,
             weights_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO,
             activation_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO,
             per_channel=True):
-    tp_model = generate_test_tp_model({'weights_n_bits': weight_bits,
+    tpc = generate_test_tpc({'weights_n_bits': weight_bits,
                                        'activation_n_bits': activation_bits,
                                        'weights_quantization_method': weights_quantization_method,
                                        'activation_quantization_method': activation_quantization_method,
                                        'weights_per_channel_threshold': per_channel})
-    return generate_keras_tpc(name=name, tp_model=tp_model)
+    return generate_keras_tpc(name=name, tpc=tpc)
 
 
 def get_16bit_tpc(name):
-    tp_model = generate_test_tp_model({'weights_n_bits': 16,
+    tpc = generate_test_tpc({'weights_n_bits': 16,
                                        'activation_n_bits': 16})
-    return generate_keras_tpc(name=name, tp_model=tp_model)
+    return generate_keras_tpc(name=name, tpc=tpc)
 
 
 def get_16bit_tpc_per_tensor(name):
-    tp_model = generate_test_tp_model({'weights_n_bits': 16,
+    tpc = generate_test_tpc({'weights_n_bits': 16,
                                        'activation_n_bits': 16,
                                        "weights_per_channel_threshold": False})
-    return generate_keras_tpc(name=name, tp_model=tp_model)
+    return generate_keras_tpc(name=name, tpc=tpc)
 
 
 def get_quantization_disabled_keras_tpc(name):
-    tp = generate_test_tp_model({'enable_weights_quantization': False,
+    tp = generate_test_tpc({'enable_weights_quantization': False,
                                  'enable_activation_quantization': False})
-    return generate_keras_tpc(name=name, tp_model=tp)
+    return generate_keras_tpc(name=name, tpc=tp)
 
 
 def get_activation_quantization_disabled_keras_tpc(name):
-    tp = generate_test_tp_model({'enable_activation_quantization': False})
-    return generate_keras_tpc(name=name, tp_model=tp)
+    tp = generate_test_tpc({'enable_activation_quantization': False})
+    return generate_keras_tpc(name=name, tpc=tp)
 
 
 def get_weights_quantization_disabled_keras_tpc(name):
-    tp = generate_test_tp_model({'enable_weights_quantization': False})
-    return generate_keras_tpc(name=name, tp_model=tp)
+    tp = generate_test_tpc({'enable_weights_quantization': False})
+    return generate_keras_tpc(name=name, tpc=tp)
 
 
 def get_weights_only_mp_tpc_keras(base_config, default_config, mp_bitwidth_candidates_list, name):
-    mp_tp_model = generate_mixed_precision_test_tp_model(base_cfg=base_config,
-                                                         default_config=default_config,
-                                                         mp_bitwidth_candidates_list=mp_bitwidth_candidates_list)
-    return mp_tp_model
+    mp_tpc = generate_mixed_precision_test_tpc(base_cfg=base_config,
+                                                    default_config=default_config,
+                                                    mp_bitwidth_candidates_list=mp_bitwidth_candidates_list)
+    return mp_tpc
 
 
 def get_tpc_with_activation_mp_keras(base_config, default_config, mp_bitwidth_candidates_list, name, custom_opsets={}):
-    mp_tp_model = generate_tp_model_with_activation_mp(base_cfg=base_config,
-                                                       default_config=default_config,
-                                                       mp_bitwidth_candidates_list=mp_bitwidth_candidates_list,
-                                                       custom_opsets=list(custom_opsets.keys()))
+    mp_tpc = generate_tpc_with_activation_mp(base_cfg=base_config,
+                                                  default_config=default_config,
+                                                  mp_bitwidth_candidates_list=mp_bitwidth_candidates_list,
+                                                  custom_opsets=list(custom_opsets.keys()))
 
-    return mp_tp_model
+    return mp_tpc
diff --git a/tests/keras_tests/xquant_tests/test_xquant_end2end.py b/tests/keras_tests/xquant_tests/test_xquant_end2end.py
index b62fe7c86..f03b60f33 100644
--- a/tests/keras_tests/xquant_tests/test_xquant_end2end.py
+++ b/tests/keras_tests/xquant_tests/test_xquant_end2end.py
@@ -28,7 +28,7 @@
 import model_compression_toolkit as mct
 from mct_quantizers import KerasQuantizationWrapper
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
 from model_compression_toolkit.xquant.common.similarity_functions import DEFAULT_SIMILARITY_METRICS_NAMES
 from model_compression_toolkit.xquant.common.xquant_config import XQuantConfig
 
@@ -42,7 +42,7 @@
     INTERMEDIATE_SIMILARITY_METRICS_VAL, XQUANT_REPR, XQUANT_VAL, CUT_MEMORY_ELEMENTS, CUT_TOTAL_SIZE
 
 from model_compression_toolkit.xquant.keras.facade_xquant_report import xquant_report_keras_experimental
-from tests.common_tests.helpers.tpcs_for_tests.v2.tp_model import get_tp_model
+from tests.common_tests.helpers.tpcs_for_tests.v2.tpc import get_tpc
 
 
 def random_data_gen(shape=(8, 8, 3), use_labels=False, num_inputs=1, batch_size=2, num_iter=2):
@@ -75,7 +75,7 @@ def get_core_config(self):
         return mct.core.CoreConfig(debug_config=mct.core.DebugConfig(simulate_scheduler=True))
 
     def get_tpc(self):
-        return get_tp_model()
+        return get_tpc()
 
     def get_model_to_test(self):
         inputs = keras.layers.Input(shape=self.get_input_shape())
diff --git a/tests/pytorch_tests/exporter_tests/base_pytorch_export_test.py b/tests/pytorch_tests/exporter_tests/base_pytorch_export_test.py
index 332a4f7d8..b98101512 100644
--- a/tests/pytorch_tests/exporter_tests/base_pytorch_export_test.py
+++ b/tests/pytorch_tests/exporter_tests/base_pytorch_export_test.py
@@ -23,10 +23,10 @@
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
 from model_compression_toolkit.exporter.model_exporter.pytorch.pytorch_export_facade import DEFAULT_ONNX_OPSET_VERSION
 
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
     generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 
 
 class BasePytorchExportTest(unittest.TestCase):
@@ -41,7 +41,7 @@ def get_dataset(self):
         yield [to_torch_tensor(np.random.rand(*shape)).to(get_working_device()) for shape in self.get_input_shapes()]
 
     def get_tpc(self):
-        return mct.get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
+        return mct.get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
 
     def get_serialization_format(self):
         raise NotImplemented
diff --git a/tests/pytorch_tests/exporter_tests/custom_ops_tests/test_export_lut_symmetric_onnx_quantizers.py b/tests/pytorch_tests/exporter_tests/custom_ops_tests/test_export_lut_symmetric_onnx_quantizers.py
index 122146a2b..4212248d8 100644
--- a/tests/pytorch_tests/exporter_tests/custom_ops_tests/test_export_lut_symmetric_onnx_quantizers.py
+++ b/tests/pytorch_tests/exporter_tests/custom_ops_tests/test_export_lut_symmetric_onnx_quantizers.py
@@ -18,7 +18,7 @@
 from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
     generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.pytorch_tests.exporter_tests.base_pytorch_onnx_export_test import BasePytorchONNXCustomOpsExportTest
 from tests.pytorch_tests.exporter_tests.custom_ops_tests.test_export_pot_onnx_quantizers import OneLayer
 
@@ -29,11 +29,11 @@ def get_model(self):
         return OneLayer(torch.nn.Conv2d, in_channels=3, out_channels=4, kernel_size=5)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'activation_n_bits': 2,
+        tp = generate_test_tpc({'activation_n_bits': 2,
                                      'weights_n_bits': 2,
                                      'weights_quantization_method': QuantizationMethod.LUT_SYM_QUANTIZER,
                                      'activation_quantization_method': QuantizationMethod.POWER_OF_TWO})
-        return generate_pytorch_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        return generate_pytorch_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
 
     def compare(self, exported_model, wrapped_quantized_model, quantization_info, onnx_op_to_search="WeightsLUTSymmetricQuantizer"):
@@ -61,11 +61,11 @@ def get_model(self):
         return OneLayer(torch.nn.Conv2d, in_channels=3, out_channels=4, kernel_size=5)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'activation_n_bits': 2,
+        tp = generate_test_tpc({'activation_n_bits': 2,
                                      'weights_n_bits': 2,
                                      'weights_quantization_method': QuantizationMethod.LUT_POT_QUANTIZER,
                                      'activation_quantization_method': QuantizationMethod.POWER_OF_TWO})
-        return generate_pytorch_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        return generate_pytorch_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
 
     def compare(self, exported_model, wrapped_quantized_model, quantization_info):
diff --git a/tests/pytorch_tests/exporter_tests/custom_ops_tests/test_export_pot_onnx_quantizers.py b/tests/pytorch_tests/exporter_tests/custom_ops_tests/test_export_pot_onnx_quantizers.py
index 97f1cde62..b76a0b0bc 100644
--- a/tests/pytorch_tests/exporter_tests/custom_ops_tests/test_export_pot_onnx_quantizers.py
+++ b/tests/pytorch_tests/exporter_tests/custom_ops_tests/test_export_pot_onnx_quantizers.py
@@ -27,7 +27,7 @@
 
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
     generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.pytorch_tests.exporter_tests.base_pytorch_onnx_export_test import BasePytorchONNXCustomOpsExportTest
 from tests.pytorch_tests.model_tests.feature_models.qat_test import dummy_train
 import onnx
@@ -51,9 +51,9 @@ def get_model(self):
         return OneLayer(torch.nn.Conv2d, in_channels=3, out_channels=4, kernel_size=5)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'activation_n_bits': 2,
+        tp = generate_test_tpc({'activation_n_bits': 2,
                                      'weights_n_bits': 2})
-        return generate_pytorch_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        return generate_pytorch_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
     def compare(self, exported_model, wrapped_quantized_model, quantization_info):
         pot_q_nodes = self._get_onnx_node_by_type(exported_model, "ActivationPOTQuantizer")
diff --git a/tests/pytorch_tests/exporter_tests/custom_ops_tests/test_export_symmetric_onnx_quantizers.py b/tests/pytorch_tests/exporter_tests/custom_ops_tests/test_export_symmetric_onnx_quantizers.py
index 923d86d5f..71f59a5da 100644
--- a/tests/pytorch_tests/exporter_tests/custom_ops_tests/test_export_symmetric_onnx_quantizers.py
+++ b/tests/pytorch_tests/exporter_tests/custom_ops_tests/test_export_symmetric_onnx_quantizers.py
@@ -28,7 +28,7 @@
 
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
     generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.pytorch_tests.exporter_tests.base_pytorch_onnx_export_test import BasePytorchONNXCustomOpsExportTest
 from tests.pytorch_tests.exporter_tests.custom_ops_tests.test_export_pot_onnx_quantizers import OneLayer
 from tests.pytorch_tests.model_tests.feature_models.qat_test import dummy_train
@@ -43,11 +43,11 @@ def get_model(self):
         return OneLayer(torch.nn.Conv2d, in_channels=3, out_channels=4, kernel_size=5)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'activation_n_bits': 2,
+        tp = generate_test_tpc({'activation_n_bits': 2,
                                      'weights_n_bits': 2,
                                      'weights_quantization_method': QuantizationMethod.SYMMETRIC,
                                      'activation_quantization_method': QuantizationMethod.SYMMETRIC})
-        return generate_pytorch_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        return generate_pytorch_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
 
     def compare(self, exported_model, wrapped_quantized_model, quantization_info):
diff --git a/tests/pytorch_tests/exporter_tests/custom_ops_tests/test_export_uniform_onnx_quantizers.py b/tests/pytorch_tests/exporter_tests/custom_ops_tests/test_export_uniform_onnx_quantizers.py
index e4b2c1fed..8cf4b354e 100644
--- a/tests/pytorch_tests/exporter_tests/custom_ops_tests/test_export_uniform_onnx_quantizers.py
+++ b/tests/pytorch_tests/exporter_tests/custom_ops_tests/test_export_uniform_onnx_quantizers.py
@@ -19,7 +19,7 @@
 from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
     generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.pytorch_tests.exporter_tests.base_pytorch_onnx_export_test import BasePytorchONNXCustomOpsExportTest
 from tests.pytorch_tests.exporter_tests.custom_ops_tests.test_export_pot_onnx_quantizers import OneLayer
 
@@ -32,11 +32,11 @@ def get_model(self):
         return OneLayer(torch.nn.Conv2d, in_channels=3, out_channels=4, kernel_size=5)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'activation_n_bits': 2,
+        tp = generate_test_tpc({'activation_n_bits': 2,
                                      'weights_n_bits': 2,
                                      'weights_quantization_method': QuantizationMethod.UNIFORM,
                                      'activation_quantization_method': QuantizationMethod.UNIFORM})
-        return generate_pytorch_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        return generate_pytorch_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
     def compare(self, exported_model, wrapped_quantized_model, quantization_info):
         pot_q_nodes = self._get_onnx_node_by_type(exported_model, "ActivationUniformQuantizer")
diff --git a/tests/pytorch_tests/exporter_tests/test_exporting_qat_models.py b/tests/pytorch_tests/exporter_tests/test_exporting_qat_models.py
index 59b2671f8..6b1b8c53c 100644
--- a/tests/pytorch_tests/exporter_tests/test_exporting_qat_models.py
+++ b/tests/pytorch_tests/exporter_tests/test_exporting_qat_models.py
@@ -25,7 +25,7 @@
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
     generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.pytorch_tests.model_tests.feature_models.qat_test import dummy_train
 
 
@@ -38,8 +38,8 @@ def get_dataset(self):
         yield [to_torch_tensor(np.random.rand(1, 3, 224, 224)).to(get_working_device())]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 2})
-        return generate_pytorch_tpc(name="test_conv2d_2bit_fq_weight", tp_model=tp)
+        tp = generate_test_tpc({'weights_n_bits': 2})
+        return generate_pytorch_tpc(name="test_conv2d_2bit_fq_weight", tpc=tp)
 
     def get_serialization_format(self):
         return mct.exporter.PytorchExportSerializationFormat.TORCHSCRIPT
diff --git a/tests/pytorch_tests/function_tests/get_gptq_config_test.py b/tests/pytorch_tests/function_tests/get_gptq_config_test.py
index 0b8aa03d1..8e4f59610 100644
--- a/tests/pytorch_tests/function_tests/get_gptq_config_test.py
+++ b/tests/pytorch_tests/function_tests/get_gptq_config_test.py
@@ -22,7 +22,7 @@
 from model_compression_toolkit import DefaultDict
 from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.gptq.common.gptq_constants import QUANT_PARAM_LEARNING_STR, MAX_LSB_STR
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 
@@ -88,8 +88,8 @@ def run_test(self):
         else:
             gptq_config.gptq_quantizer_params_override = None
 
-        tp = generate_test_tp_model({'weights_quantization_method': self.quantization_method})
-        symmetric_weights_tpc = generate_pytorch_tpc(name="gptq_config_test", tp_model=tp)
+        tp = generate_test_tpc({'weights_quantization_method': self.quantization_method})
+        symmetric_weights_tpc = generate_pytorch_tpc(name="gptq_config_test", tpc=tp)
 
         float_model = TestModel()
 
diff --git a/tests/pytorch_tests/function_tests/resource_utilization_data_test.py b/tests/pytorch_tests/function_tests/resource_utilization_data_test.py
index 6eefe763d..1bef65772 100644
--- a/tests/pytorch_tests/function_tests/resource_utilization_data_test.py
+++ b/tests/pytorch_tests/function_tests/resource_utilization_data_test.py
@@ -25,7 +25,7 @@
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
     get_op_quantization_configs
 from model_compression_toolkit.core.pytorch.constants import KERNEL
-from tests.common_tests.helpers.generate_test_tp_model import generate_tp_model_with_activation_mp, generate_test_op_qc, \
+from tests.common_tests.helpers.generate_test_tpc import generate_tpc_with_activation_mp, generate_test_op_qc, \
     generate_test_attr_configs
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 from tests.pytorch_tests.tpc_pytorch import get_mp_activation_pytorch_tpc_dict
@@ -107,7 +107,7 @@ def prep_test(model, mp_bitwidth_candidates_list, random_datagen):
     default_config = base_config.clone_and_edit(attr_weights_configs_mapping={})
 
     tpc_dict = get_mp_activation_pytorch_tpc_dict(
-        tpc_model=generate_tp_model_with_activation_mp(
+        tpc_model=generate_tpc_with_activation_mp(
             base_cfg=base_config,
             default_config=default_config,
             mp_bitwidth_candidates_list=[(8, 8), (8, 4), (8, 2),
diff --git a/tests/pytorch_tests/function_tests/set_layer_to_bitwidth_test.py b/tests/pytorch_tests/function_tests/set_layer_to_bitwidth_test.py
index 1cef3c0ef..7b6ca5d26 100644
--- a/tests/pytorch_tests/function_tests/set_layer_to_bitwidth_test.py
+++ b/tests/pytorch_tests/function_tests/set_layer_to_bitwidth_test.py
@@ -28,7 +28,7 @@
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs
-from tests.common_tests.helpers.generate_test_tp_model import generate_mixed_precision_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_mixed_precision_test_tpc
 from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_quantization_parameters
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 from tests.pytorch_tests.tpc_pytorch import get_pytorch_test_tpc_dict
@@ -76,7 +76,7 @@ def representative_data_gen(self, n_iters=1):
     def run_test(self, seed=0, **kwargs):
         base_config, _, default_config = get_op_quantization_configs()
         tpc = get_pytorch_test_tpc_dict(
-            tp_model=generate_mixed_precision_test_tp_model(
+            tpc=generate_mixed_precision_test_tpc(
                 base_cfg=base_config,
                 default_config=default_config,
                 mp_bitwidth_candidates_list=[(8, 8), (4, 8), (2, 8)]),
@@ -126,7 +126,7 @@ def representative_data_gen(self, n_iters=1):
     def run_test(self, seed=0, **kwargs):
         base_config, _, default_config = get_op_quantization_configs()
         tpc = get_pytorch_test_tpc_dict(
-            tp_model=generate_mixed_precision_test_tp_model(
+            tpc=generate_mixed_precision_test_tpc(
                 base_cfg=base_config,
                 default_config=default_config,
                 mp_bitwidth_candidates_list=[(8, 8), (8, 4), (8, 2)]),
diff --git a/tests/pytorch_tests/function_tests/test_export_pytorch_fully_quantized_model.py b/tests/pytorch_tests/function_tests/test_export_pytorch_fully_quantized_model.py
index e584f5392..4bc46d1fe 100644
--- a/tests/pytorch_tests/function_tests/test_export_pytorch_fully_quantized_model.py
+++ b/tests/pytorch_tests/function_tests/test_export_pytorch_fully_quantized_model.py
@@ -25,14 +25,14 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.constants import PYTORCH
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
 from model_compression_toolkit.exporter import pytorch_export_model
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit import get_target_platform_capabilities
 
-DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
+DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
 
 _, SAVED_MODEL_PATH_PTH = tempfile.mkstemp('.pth')
 _, SAVED_MODEL_PATH_ONNX = tempfile.mkstemp('.onnx')
@@ -76,11 +76,11 @@ def setUp(self) -> None:
 
     def get_tpc(self):
         return generate_pytorch_tpc(name="2_quant_pytorch_test",
-                                    tp_model=generate_test_tp_model({'weights_n_bits': 2,
+                                    tpc=generate_test_tpc({'weights_n_bits': 2,
                                                                      'activation_n_bits': 8,
                                                                      'enable_weights_quantization': True,
                                                                      'enable_activation_quantization': True
-                                                                     }))
+                                                           }))
 
     def run_mct(self, model):
         core_config = mct.core.CoreConfig()
diff --git a/tests/pytorch_tests/function_tests/test_pytorch_tp_model.py b/tests/pytorch_tests/function_tests/test_pytorch_tpc.py
similarity index 97%
rename from tests/pytorch_tests/function_tests/test_pytorch_tp_model.py
rename to tests/pytorch_tests/function_tests/test_pytorch_tpc.py
index 1df69af38..8875d47c8 100644
--- a/tests/pytorch_tests/function_tests/test_pytorch_tp_model.py
+++ b/tests/pytorch_tests/function_tests/test_pytorch_tpc.py
@@ -33,10 +33,10 @@
     LayerFilterParams
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import \
     Greater, Smaller, Eq
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL, IMX500_TP_MODEL, \
-    TFLITE_TP_MODEL, QNNPACK_TP_MODEL, KERNEL_ATTR, WEIGHTS_N_BITS, PYTORCH_KERNEL, BIAS_ATTR, BIAS
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC, IMX500_TPC, \
+    TFLITE_TPC, QNNPACK_TPC, KERNEL_ATTR, WEIGHTS_N_BITS, PYTORCH_KERNEL, BIAS_ATTR, BIAS
 from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_op_qc, generate_test_attr_configs
+from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs
 from tests.pytorch_tests.layer_tests.base_pytorch_layer_test import LayerTestModel
 
 tp = mct.target_platform
@@ -276,7 +276,7 @@ def test_pytorch_fusing_patterns(self):
 class TestGetPytorchTPC(unittest.TestCase):
 
     def test_get_pytorch_models(self):
-        tpc = mct.get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
+        tpc = mct.get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
         model = mobilenet_v2(pretrained=True)
 
         def rep_data():
@@ -298,16 +298,16 @@ def rep_data():
                                                                         core_config=core_config)
 
     def test_get_pytorch_supported_version(self):
-        tpc = mct.get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)  # Latest
+        tpc = mct.get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)  # Latest
         self.assertTrue(tpc.tpc_minor_version == 1)
 
-        tpc = mct.get_target_platform_capabilities(PYTORCH, IMX500_TP_MODEL, "v1")
+        tpc = mct.get_target_platform_capabilities(PYTORCH, IMX500_TPC, "v1")
         self.assertTrue(tpc.tpc_minor_version == 1)
 
-        tpc = mct.get_target_platform_capabilities(PYTORCH, TFLITE_TP_MODEL, "v1")
+        tpc = mct.get_target_platform_capabilities(PYTORCH, TFLITE_TPC, "v1")
         self.assertTrue(tpc.tpc_minor_version == 1)
 
-        tpc = mct.get_target_platform_capabilities(PYTORCH, QNNPACK_TP_MODEL, "v1")
+        tpc = mct.get_target_platform_capabilities(PYTORCH, QNNPACK_TPC, "v1")
         self.assertTrue(tpc.tpc_minor_version == 1)
 
     def test_get_pytorch_not_supported_platform(self):
@@ -317,12 +317,12 @@ def test_get_pytorch_not_supported_platform(self):
 
     def test_get_pytorch_not_supported_fw(self):
         with self.assertRaises(Exception) as e:
-            mct.get_target_platform_capabilities("ONNX", DEFAULT_TP_MODEL)
+            mct.get_target_platform_capabilities("ONNX", DEFAULT_TPC)
         self.assertTrue(e.exception)
 
     def test_get_pytorch_not_supported_version(self):
         with self.assertRaises(Exception) as e:
-            mct.get_target_platform_capabilities(PYTORCH, IMX500_TP_MODEL, "v0")
+            mct.get_target_platform_capabilities(PYTORCH, IMX500_TPC, "v0")
         self.assertTrue(e.exception)
 
 
diff --git a/tests/pytorch_tests/function_tests/test_quant_config_filtering.py b/tests/pytorch_tests/function_tests/test_quant_config_filtering.py
index 870b4818e..12850512d 100644
--- a/tests/pytorch_tests/function_tests/test_quant_config_filtering.py
+++ b/tests/pytorch_tests/function_tests/test_quant_config_filtering.py
@@ -24,8 +24,8 @@
     get_config_options_by_operators_set
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
-from tests.common_tests.helpers.generate_test_tp_model import generate_custom_test_tp_model
-from tests.common_tests.helpers.tpcs_for_tests.v3.tp_model import get_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_custom_test_tpc
+from tests.common_tests.helpers.tpcs_for_tests.v3.tpc import get_tpc
 
 get_op_set = lambda x, x_list: [op_set for op_set in x_list if op_set.name == x][0]
 
@@ -34,17 +34,17 @@ class TestTorchQuantConfigFiltering(unittest.TestCase):
 
     @staticmethod
     def get_tpc_default_16bit():
-        tpc = get_tp_model()
+        tpc = get_tpc()
         base_cfg_16 = [c for c in get_config_options_by_operators_set(tpc,
                                                                       OperatorSetNames.MUL).quantization_configurations
                        if c.activation_n_bits == 16][0].clone_and_edit()
         qco_16 = QuantizationConfigOptions(base_config=base_cfg_16,
                                            quantization_configurations=(tpc.default_qco.base_config,
                                                                         base_cfg_16))
-        tpc = generate_custom_test_tp_model(
+        tpc = generate_custom_test_tpc(
             name="custom_16_bit_tpc",
             base_cfg=tpc.default_qco.base_config,
-            base_tp_model=tpc,
+            base_tpc=tpc,
             operator_sets_dict={
                 OperatorSetNames.MUL: qco_16,
                 OperatorSetNames.GELU: qco_16,
diff --git a/tests/pytorch_tests/function_tests/test_quantization_configurations.py b/tests/pytorch_tests/function_tests/test_quantization_configurations.py
index 14ece9384..4f59c267c 100644
--- a/tests/pytorch_tests/function_tests/test_quantization_configurations.py
+++ b/tests/pytorch_tests/function_tests/test_quantization_configurations.py
@@ -21,7 +21,7 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 import torch
 
 class ModelToTest(torch.nn.Module):
@@ -72,12 +72,12 @@ def representative_data_gen():
 
         model = model_gen()
         for quantize_method, error_method, bias_correction, per_channel in weights_test_combinations:
-            tp = generate_test_tp_model({
+            tp = generate_test_tpc({
                 'weights_quantization_method': quantize_method,
                 'weights_n_bits': 8,
                 'activation_n_bits': 16,
                 'weights_per_channel_threshold': per_channel})
-            tpc = generate_pytorch_tpc(name="quant_config_weights_test", tp_model=tp)
+            tpc = generate_pytorch_tpc(name="quant_config_weights_test", tpc=tp)
 
             qc = mct.core.QuantizationConfig(activation_error_method=mct.core.QuantizationErrorMethod.NOCLIPPING,
                                              weights_error_method=error_method,
@@ -91,11 +91,11 @@ def representative_data_gen():
 
         model = model_gen()
         for quantize_method, error_method, relu_bound_to_power_of_2, shift_negative_correction in activation_test_combinations:
-            tp = generate_test_tp_model({
+            tp = generate_test_tpc({
                 'activation_quantization_method': quantize_method,
                 'weights_n_bits': 16,
                 'activation_n_bits': 8})
-            tpc = generate_pytorch_tpc(name="quant_config_activation_test", tp_model=tp)
+            tpc = generate_pytorch_tpc(name="quant_config_activation_test", tpc=tp)
 
             qc = mct.core.QuantizationConfig(activation_error_method=error_method,
                                              weights_error_method=mct.core.QuantizationErrorMethod.NOCLIPPING,
diff --git a/tests/pytorch_tests/layer_tests/base_pytorch_layer_test.py b/tests/pytorch_tests/layer_tests/base_pytorch_layer_test.py
index 78ff7c63f..ddb8b8ca4 100644
--- a/tests/pytorch_tests/layer_tests/base_pytorch_layer_test.py
+++ b/tests/pytorch_tests/layer_tests/base_pytorch_layer_test.py
@@ -41,7 +41,7 @@
 from tests.common_tests.base_layer_test import BaseLayerTest, LayerTestMode
 from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
 from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 
 
 PYTORCH_LAYER_TEST_OPS = {
@@ -180,13 +180,13 @@ def __init__(self,
     def get_tpc(self):
         if self.current_mode == LayerTestMode.FLOAT:
             # Disable all features that are enabled by default:
-            tp = generate_test_tp_model({'enable_weights_quantization': False,
+            tp = generate_test_tpc({'enable_weights_quantization': False,
                                          'enable_activation_quantization': False})
-            return generate_pytorch_tpc(name="base_layer_test", tp_model=tp)
+            return generate_pytorch_tpc(name="base_layer_test", tpc=tp)
         elif self.current_mode == LayerTestMode.QUANTIZED_8_BITS:
-            tp = generate_test_tp_model({'weights_n_bits': 8,
+            tp = generate_test_tpc({'weights_n_bits': 8,
                                          'activation_n_bits': 8})
-            return generate_pytorch_tpc(name="8bit_layer_test", tp_model=tp)
+            return generate_pytorch_tpc(name="8bit_layer_test", tpc=tp)
         else:
             raise NotImplemented
 
diff --git a/tests/pytorch_tests/model_tests/base_pytorch_feature_test.py b/tests/pytorch_tests/model_tests/base_pytorch_feature_test.py
index 0e2f0cfe7..266bacd27 100644
--- a/tests/pytorch_tests/model_tests/base_pytorch_feature_test.py
+++ b/tests/pytorch_tests/model_tests/base_pytorch_feature_test.py
@@ -14,7 +14,7 @@
 # ==============================================================================
 from model_compression_toolkit.constants import PYTORCH
 from model_compression_toolkit.core.common.framework_implementation import FrameworkImplementation
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
 from model_compression_toolkit.core import FrameworkInfo
 from model_compression_toolkit.ptq import pytorch_post_training_quantization
 from model_compression_toolkit import get_target_platform_capabilities
@@ -38,7 +38,7 @@ def __init__(self,
                          input_shape=input_shape)
 
     def get_tpc(self):
-        return get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
+        return get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
 
     def get_ptq_facade(self):
         return pytorch_post_training_quantization
diff --git a/tests/pytorch_tests/model_tests/base_pytorch_test.py b/tests/pytorch_tests/model_tests/base_pytorch_test.py
index da861212e..e00f22da3 100644
--- a/tests/pytorch_tests/model_tests/base_pytorch_test.py
+++ b/tests/pytorch_tests/model_tests/base_pytorch_test.py
@@ -23,7 +23,7 @@
 
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from tests.common_tests.base_feature_test import BaseFeatureNetworkTest
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 
 """
 The base test class for the feature networks
@@ -44,21 +44,21 @@ def __init__(self,
 
     def get_tpc(self):
         return {
-            'no_quantization': generate_test_tp_model({'weights_n_bits': 32,
+            'no_quantization': generate_test_tpc({'weights_n_bits': 32,
                                                        'activation_n_bits': 32,
                                                        'enable_weights_quantization': False,
                                                        'enable_activation_quantization': False
-                                                       }),
-            'all_32bit': generate_test_tp_model({'weights_n_bits': 32,
+                                                  }),
+            'all_32bit': generate_test_tpc({'weights_n_bits': 32,
                                                  'activation_n_bits': 32,
                                                  'enable_weights_quantization': True,
                                                  'enable_activation_quantization': True
-                                                 }),
-            'all_4bit': generate_test_tp_model({'weights_n_bits': 4,
+                                            }),
+            'all_4bit': generate_test_tpc({'weights_n_bits': 4,
                                                 'activation_n_bits': 4,
                                                 'enable_weights_quantization': True,
                                                 'enable_activation_quantization': True
-                                                }),
+                                           }),
         }
 
     def get_core_configs(self):
diff --git a/tests/pytorch_tests/model_tests/feature_models/activation_16bit_test.py b/tests/pytorch_tests/model_tests/feature_models/activation_16bit_test.py
index 3ab11b38a..ec035b618 100644
--- a/tests/pytorch_tests/model_tests/feature_models/activation_16bit_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/activation_16bit_test.py
@@ -18,13 +18,13 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit.constants import PYTORCH
 from model_compression_toolkit.core import MixedPrecisionQuantizationConfig
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OperatorSetNames, \
     QuantizationConfigOptions
 from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import \
     get_config_options_by_operators_set
-from tests.common_tests.helpers.generate_test_tp_model import generate_custom_test_tp_model
-from tests.common_tests.helpers.tpcs_for_tests.v4.tp_model import get_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_custom_test_tpc
+from tests.common_tests.helpers.tpcs_for_tests.v4.tpc import get_tpc
 from model_compression_toolkit.core.pytorch.utils import get_working_device
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
 
@@ -85,16 +85,16 @@ def forward(self, x):
 class Activation16BitTest(BasePytorchFeatureNetworkTest):
 
     def get_tpc(self):
-        tpc = get_tp_model()
+        tpc = get_tpc()
         base_cfg_16 = [c for c in get_config_options_by_operators_set(tpc, OperatorSetNames.MUL).quantization_configurations
                        if c.activation_n_bits == 16][0].clone_and_edit()
         qco_16 = QuantizationConfigOptions(base_config=base_cfg_16,
                                            quantization_configurations=(tpc.default_qco.base_config,
                                                                         base_cfg_16))
-        tpc = generate_custom_test_tp_model(
+        tpc = generate_custom_test_tpc(
             name="custom_16_bit_tpc",
             base_cfg=tpc.default_qco.base_config,
-            base_tp_model=tpc,
+            base_tpc=tpc,
             operator_sets_dict={
                 OperatorSetNames.MUL: qco_16,
                 OperatorSetNames.GELU: qco_16,
@@ -129,7 +129,7 @@ def compare(self, quantized_model, float_model, input_x=None, quantization_info=
 class Activation16BitMixedPrecisionTest(Activation16BitTest):
 
     def get_tpc(self):
-        tpc = get_tp_model()
+        tpc = get_tpc()
 
         mul_qco = get_config_options_by_operators_set(tpc, OperatorSetNames.MUL)
         base_cfg_16 = [l for l in mul_qco.quantization_configurations if l.activation_n_bits == 16][0]
@@ -141,10 +141,10 @@ def get_tpc(self):
         qco_16 = QuantizationConfigOptions(base_config=base_cfg_16,
                                            quantization_configurations=quantization_configurations)
 
-        tpc = generate_custom_test_tp_model(
+        tpc = generate_custom_test_tpc(
             name="custom_16_bit_tpc",
             base_cfg=tpc.default_qco.base_config,
-            base_tp_model=tpc,
+            base_tpc=tpc,
             operator_sets_dict={
                 OperatorSetNames.MUL: qco_16,
             })
diff --git a/tests/pytorch_tests/model_tests/feature_models/bn_attributes_quantization_test.py b/tests/pytorch_tests/model_tests/feature_models/bn_attributes_quantization_test.py
index 3a04b53a0..2d78dd778 100644
--- a/tests/pytorch_tests/model_tests/feature_models/bn_attributes_quantization_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/bn_attributes_quantization_test.py
@@ -24,7 +24,7 @@
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, PYTORCH_KERNEL, BIAS, \
     BIAS_ATTR
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_attr_configs, \
+from tests.common_tests.helpers.generate_test_tpc import generate_test_attr_configs, \
     DEFAULT_WEIGHT_ATTR_CONFIG, KERNEL_BASE_CONFIG, generate_test_op_qc, BIAS_CONFIG
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import Signedness
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
diff --git a/tests/pytorch_tests/model_tests/feature_models/compute_max_cut_test.py b/tests/pytorch_tests/model_tests/feature_models/compute_max_cut_test.py
index a648ff95b..1e43ad43e 100644
--- a/tests/pytorch_tests/model_tests/feature_models/compute_max_cut_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/compute_max_cut_test.py
@@ -15,9 +15,9 @@
 
 import torch.nn as nn
 import model_compression_toolkit as mct
-from tests.common_tests.helpers.tpcs_for_tests.v2.tp_model import get_tp_model
+from tests.common_tests.helpers.tpcs_for_tests.v2.tpc import get_tpc
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
 from model_compression_toolkit.constants import PYTORCH
 from mct_quantizers.pytorch.metadata import get_metadata
 
@@ -43,7 +43,7 @@ def forward(self, x):
 class ComputeMaxCutTest(BasePytorchFeatureNetworkTest):
 
     def get_tpc(self):
-        return get_tp_model()
+        return get_tpc()
 
     def create_networks(self):
         return MaxCutModel()
diff --git a/tests/pytorch_tests/model_tests/feature_models/const_quantization_test.py b/tests/pytorch_tests/model_tests/feature_models/const_quantization_test.py
index 7d7e44684..56b8cdbab 100644
--- a/tests/pytorch_tests/model_tests/feature_models/const_quantization_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/const_quantization_test.py
@@ -22,12 +22,12 @@
 from model_compression_toolkit.core import MixedPrecisionQuantizationConfig, CoreConfig, QuantizationConfig
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, torch_tensor_to_numpy, set_model
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
-from tests.common_tests.helpers.tpcs_for_tests.v4.tp_model import get_tp_model as get_tp_v4
-from tests.common_tests.helpers.tpcs_for_tests.v3.tp_model import get_tp_model as get_tp_v3
+from tests.common_tests.helpers.tpcs_for_tests.v4.tpc import get_tpc as get_tp_v4
+from tests.common_tests.helpers.tpcs_for_tests.v3.tpc import get_tpc as get_tp_v3
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
 from tests.pytorch_tests.utils import get_layers_from_model_by_type
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_attr_configs, DEFAULT_WEIGHT_ATTR_CONFIG
+from tests.common_tests.helpers.generate_test_tpc import generate_test_attr_configs, DEFAULT_WEIGHT_ATTR_CONFIG
 from mct_quantizers import PytorchQuantizationWrapper
 
 tp = mct.target_platform
@@ -254,7 +254,7 @@ def get_tpc(self):
                                                    weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO))
         const_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([const_config]))
 
-        tp_model = schema.TargetPlatformCapabilities(
+        tpc = schema.TargetPlatformCapabilities(
             default_qco=default_configuration_options,
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -262,7 +262,7 @@ def get_tpc(self):
             operator_set=tuple([schema.OperatorsSet(name="WeightQuant", qc_options=const_configuration_options)]),
             add_metadata=False)
 
-        return tp_model
+        return tpc
 
     def create_networks(self):
         return ExpandConstQuantizationNet(self.val_batch_size)
diff --git a/tests/pytorch_tests/model_tests/feature_models/const_representation_test.py b/tests/pytorch_tests/model_tests/feature_models/const_representation_test.py
index b7a03e19f..c128d963b 100644
--- a/tests/pytorch_tests/model_tests/feature_models/const_representation_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/const_representation_test.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, torch_tensor_to_numpy, set_model
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
 
@@ -54,10 +54,10 @@ def __init__(self, unit_test, func, const, input_reverse_order=False):
         self.input_reverse_order = input_reverse_order
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 32,
+        tp = generate_test_tpc({'weights_n_bits': 32,
                                      'activation_n_bits': 32,
                                      'enable_activation_quantization': False})
-        return generate_pytorch_tpc(name="linear_collapsing_test", tp_model=tp)
+        return generate_pytorch_tpc(name="linear_collapsing_test", tpc=tp)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.NOCLIPPING,
@@ -126,11 +126,11 @@ def __init__(self, unit_test, func, const, enable_weights_quantization):
         self.enable_weights_quantization = enable_weights_quantization
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 32,
+        tp = generate_test_tpc({'weights_n_bits': 32,
                                      'activation_n_bits': 32,
                                      'enable_weights_quantization': self.enable_weights_quantization,
                                      'enable_activation_quantization': False})
-        return generate_pytorch_tpc(name="linear_collapsing_test", tp_model=tp)
+        return generate_pytorch_tpc(name="linear_collapsing_test", tpc=tp)
 
     def create_networks(self):
         return ConstRepresentationLinearLayerNet(self.func, self.const)
@@ -206,10 +206,10 @@ def create_networks(self):
         return ConstRepresentationCodeNet(self.input_shape[2:])
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 32,
+        tp = generate_test_tpc({'weights_n_bits': 32,
                                      'activation_n_bits': 32,
                                      'enable_activation_quantization': False})
-        return generate_pytorch_tpc(name="linear_collapsing_test", tp_model=tp)
+        return generate_pytorch_tpc(name="linear_collapsing_test", tpc=tp)
 
     def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
         in_torch_tensor = to_torch_tensor(input_x[0])
diff --git a/tests/pytorch_tests/model_tests/feature_models/constant_conv_substitution_test.py b/tests/pytorch_tests/model_tests/feature_models/constant_conv_substitution_test.py
index 5c1457be4..8558ca28e 100644
--- a/tests/pytorch_tests/model_tests/feature_models/constant_conv_substitution_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/constant_conv_substitution_test.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, set_model
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 import numpy as np
 
@@ -31,11 +31,11 @@ def __init__(self, unit_test):
         super().__init__(unit_test=unit_test)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 32,
+        tp = generate_test_tpc({'weights_n_bits': 32,
                                      'activation_n_bits': 32,
                                      'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_pytorch_tpc(name="permute_substitution_test", tp_model=tp)
+        return generate_pytorch_tpc(name="permute_substitution_test", tpc=tp)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.NOCLIPPING,
diff --git a/tests/pytorch_tests/model_tests/feature_models/gptq_test.py b/tests/pytorch_tests/model_tests/feature_models/gptq_test.py
index 3b8b2230c..9349e12f1 100644
--- a/tests/pytorch_tests/model_tests/feature_models/gptq_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/gptq_test.py
@@ -27,7 +27,7 @@
 from model_compression_toolkit.gptq.pytorch.gptq_loss import multiple_tensors_mse_loss
 from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
 from tests.pytorch_tests.utils import extract_model_weights
 
@@ -92,7 +92,7 @@ def create_networks(self):
     def get_tpc(self):
         return generate_pytorch_tpc(
             name="gptq_test",
-            tp_model=generate_test_tp_model({'weights_n_bits': self.weights_bits,
+            tpc=generate_test_tpc({'weights_n_bits': self.weights_bits,
                                              'weights_quantization_method': self.weights_quant_method}))
 
     def gptq_compare(self, ptq_model, gptq_model, input_x=None):
diff --git a/tests/pytorch_tests/model_tests/feature_models/linear_collapsing_test.py b/tests/pytorch_tests/model_tests/feature_models/linear_collapsing_test.py
index 40936db8e..24c28eb1e 100644
--- a/tests/pytorch_tests/model_tests/feature_models/linear_collapsing_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/linear_collapsing_test.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, torch_tensor_to_numpy, set_model
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
 
@@ -31,11 +31,11 @@ def __init__(self, unit_test):
         super().__init__(unit_test=unit_test, input_shape=(16, 32, 32))
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 32,
+        tp = generate_test_tpc({'weights_n_bits': 32,
                                      'activation_n_bits': 32,
                                      'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_pytorch_tpc(name="linear_collapsing_test", tp_model=tp)
+        return generate_pytorch_tpc(name="linear_collapsing_test", tpc=tp)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.NOCLIPPING,
diff --git a/tests/pytorch_tests/model_tests/feature_models/lut_quantizer_test.py b/tests/pytorch_tests/model_tests/feature_models/lut_quantizer_test.py
index 86e64b3e0..340920081 100644
--- a/tests/pytorch_tests/model_tests/feature_models/lut_quantizer_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/lut_quantizer_test.py
@@ -20,7 +20,7 @@
     ChangeCandidatesWeightsQuantizationMethod
 from model_compression_toolkit.core.pytorch.constants import KERNEL
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, torch_tensor_to_numpy, set_model
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.pytorch_tests.tpc_pytorch import get_pytorch_test_tpc_dict
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 
@@ -90,7 +90,7 @@ def __init__(self, unit_test, weights_n_bits=4, quant_method=tp.QuantizationMeth
 
     def get_tpc(self):
         return get_pytorch_test_tpc_dict(
-            tp_model=generate_test_tp_model({"weights_n_bits": self.weights_n_bits}),
+            tpc=generate_test_tpc({"weights_n_bits": self.weights_n_bits}),
             test_name='lut_quantizer_test',
             ftp_name='lut_quantizer_pytorch_test')
 
@@ -133,7 +133,7 @@ def __init__(self, unit_test, activation_n_bits=4):
 
     def get_tpc(self):
         return get_pytorch_test_tpc_dict(
-            tp_model=generate_test_tp_model({"activation_n_bits": self.activation_n_bits,
+            tpc=generate_test_tpc({"activation_n_bits": self.activation_n_bits,
                                              "activation_quantization_method": tp.QuantizationMethod.LUT_POT_QUANTIZER}),
             test_name='lut_quantizer_test',
             ftp_name='lut_quantizer_pytorch_test')
diff --git a/tests/pytorch_tests/model_tests/feature_models/manual_bit_selection.py b/tests/pytorch_tests/model_tests/feature_models/manual_bit_selection.py
index 5d012769c..31e831e87 100644
--- a/tests/pytorch_tests/model_tests/feature_models/manual_bit_selection.py
+++ b/tests/pytorch_tests/model_tests/feature_models/manual_bit_selection.py
@@ -25,8 +25,8 @@
 from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import \
     get_config_options_by_operators_set
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
-from tests.common_tests.helpers.generate_test_tp_model import generate_custom_test_tp_model
-from tests.common_tests.helpers.tpcs_for_tests.v3.tp_model import get_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_custom_test_tpc
+from tests.common_tests.helpers.tpcs_for_tests.v3.tpc import get_tpc
 from tests.pytorch_tests.model_tests.feature_models.mixed_precision_activation_test import \
     MixedPrecisionActivationBaseTest
 from tests.pytorch_tests.utils import get_layer_type_from_activation_quantizer
@@ -200,7 +200,7 @@ def compare(self, quantized_models, float_model, input_x=None, quantization_info
 class Manual16BitTest(ManualBitWidthByLayerNameTest):
 
     def get_tpc(self):
-        tpc = get_tp_model()
+        tpc = get_tpc()
 
         mul_qco = get_config_options_by_operators_set(tpc, OperatorSetNames.MUL)
         base_cfg_16 = [l for l in mul_qco.quantization_configurations if l.activation_n_bits == 16][0]
@@ -209,10 +209,10 @@ def get_tpc(self):
         qco_16 = QuantizationConfigOptions(base_config=base_cfg_16,
                                            quantization_configurations=quantization_configurations)
 
-        tpc = generate_custom_test_tp_model(
+        tpc = generate_custom_test_tpc(
             name="custom_16_bit_tpc",
             base_cfg=tpc.default_qco.base_config,
-            base_tp_model=tpc,
+            base_tpc=tpc,
             operator_sets_dict={
                 OperatorSetNames.MUL: qco_16,
             })
@@ -226,7 +226,7 @@ def create_feature_network(self, input_shape):
 class Manual16BitTestMixedPrecisionTest(ManualBitWidthByLayerNameTest):
 
     def get_tpc(self):
-        tpc = get_tp_model()
+        tpc = get_tpc()
 
         mul_qco = get_config_options_by_operators_set(tpc, OperatorSetNames.MUL)
         base_cfg_16 = [l for l in mul_qco.quantization_configurations if l.activation_n_bits == 16][0]
@@ -238,10 +238,10 @@ def get_tpc(self):
         qco_16 = QuantizationConfigOptions(base_config=base_cfg_16,
                                            quantization_configurations=quantization_configurations)
 
-        tpc = generate_custom_test_tp_model(
+        tpc = generate_custom_test_tpc(
             name="custom_16_bit_tpc",
             base_cfg=tpc.default_qco.base_config,
-            base_tp_model=tpc,
+            base_tpc=tpc,
             operator_sets_dict={
                 OperatorSetNames.MUL: qco_16,
             })
diff --git a/tests/pytorch_tests/model_tests/feature_models/metadata_test.py b/tests/pytorch_tests/model_tests/feature_models/metadata_test.py
index 4ee1937ff..5f9528087 100644
--- a/tests/pytorch_tests/model_tests/feature_models/metadata_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/metadata_test.py
@@ -18,10 +18,10 @@
 import numpy as np
 import model_compression_toolkit as mct
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, torch_tensor_to_numpy, set_model
-from tests.common_tests.helpers.tpcs_for_tests.v2.tp_model import get_tp_model
+from tests.common_tests.helpers.tpcs_for_tests.v2.tpc import get_tpc
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
 from model_compression_toolkit.constants import PYTORCH
 from mct_quantizers import PytorchQuantizationWrapper
 from mct_quantizers.pytorch.metadata import add_metadata, get_metadata, add_onnx_metadata, get_onnx_metadata
@@ -43,7 +43,7 @@ def forward(self, x):
 class MetadataTest(BasePytorchFeatureNetworkTest):
 
     def get_tpc(self):
-        return get_tp_model()
+        return get_tpc()
 
     def create_networks(self):
         return DummyNet()
diff --git a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py
index 0b9094557..7723928c5 100644
--- a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py
@@ -29,7 +29,7 @@
     QuantizationConfigOptions
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs
-from tests.common_tests.helpers.generate_test_tp_model import generate_tp_model_with_activation_mp
+from tests.common_tests.helpers.generate_test_tpc import generate_tpc_with_activation_mp
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 import model_compression_toolkit as mct
 from tests.pytorch_tests.tpc_pytorch import get_mp_activation_pytorch_tpc_dict
@@ -46,7 +46,7 @@ def __init__(self, unit_test):
     def get_tpc(self):
         base_config, _, default_config = get_op_quantization_configs()
         return get_mp_activation_pytorch_tpc_dict(
-            tpc_model=generate_tp_model_with_activation_mp(
+            tpc_model=generate_tpc_with_activation_mp(
                 base_cfg=base_config,
                 default_config=default_config,
                 mp_bitwidth_candidates_list=[(8, 8), (8, 4), (8, 2),
@@ -148,7 +148,7 @@ def get_core_configs(self):
     def get_tpc(self):
         base_config, _, default_config = get_op_quantization_configs()
         return get_mp_activation_pytorch_tpc_dict(
-            tpc_model=generate_tp_model_with_activation_mp(
+            tpc_model=generate_tpc_with_activation_mp(
                 base_cfg=base_config,
                 default_config=default_config,
                 mp_bitwidth_candidates_list=[(8, 8), (8, 4), (8, 2),
@@ -269,13 +269,13 @@ def get_tpc(self):
                    (4, 8), (4, 4), (4, 2),
                    (2, 8), (2, 4), (2, 2)]
 
-        tp_model = generate_tp_model_with_activation_mp(
+        tpc = generate_tpc_with_activation_mp(
             base_cfg=base_config,
             default_config=default_config,
             mp_bitwidth_candidates_list=mp_list,
             custom_opsets=['Softmax'])
 
-        return get_mp_activation_pytorch_tpc_dict(tpc_model=tp_model,
+        return get_mp_activation_pytorch_tpc_dict(tpc_model=tpc,
                                                   test_name='mixed_precision_activation_model',
                                                   tpc_name='mixed_precision_distance_fn_test')
 
@@ -324,7 +324,7 @@ def get_tpc(self):
             base_config=cfg,
         )
 
-        tp_model = TargetPlatformCapabilities(
+        tpc = TargetPlatformCapabilities(
             default_qco=QuantizationConfigOptions(quantization_configurations=tuple([cfg]), base_config=cfg),
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -335,7 +335,7 @@ def get_tpc(self):
             add_metadata=False,
             name="mp_activation_conf_weights_test")
 
-        return {'mixed_precision_activation_model': tp_model}
+        return {'mixed_precision_activation_model': tpc}
 
     def create_feature_network(self, input_shape):
         return MixedPrecisionActivationTestNet(input_shape)
diff --git a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_bops_test.py b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_bops_test.py
index 7016a84fd..7f80e3ac1 100644
--- a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_bops_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_bops_test.py
@@ -19,7 +19,7 @@
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs
-from tests.common_tests.helpers.generate_test_tp_model import generate_tp_model_with_activation_mp
+from tests.common_tests.helpers.generate_test_tpc import generate_tpc_with_activation_mp
 from tests.pytorch_tests.tpc_pytorch import get_mp_activation_pytorch_tpc_dict
 
 import model_compression_toolkit as mct
@@ -108,7 +108,7 @@ def __init__(self, unit_test, mixed_precision_candidates_list):
     def get_tpc(self):
         base_config, _, default_config = get_op_quantization_configs()
         return get_mp_activation_pytorch_tpc_dict(
-            tpc_model=generate_tp_model_with_activation_mp(
+            tpc_model=generate_tpc_with_activation_mp(
                 base_cfg=base_config,
                 default_config=default_config,
                 mp_bitwidth_candidates_list=self.mixed_precision_candidates_list),
diff --git a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py
index 38c343bd0..03ddf2a73 100644
--- a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py
@@ -28,9 +28,9 @@
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OperatorsSet, \
     QuantizationConfigOptions
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
-from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_tp_model, \
+from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_tpc, \
     get_op_quantization_configs
-from tests.common_tests.helpers.generate_test_tp_model import generate_mixed_precision_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_mixed_precision_test_tpc
 from tests.pytorch_tests.tpc_pytorch import get_pytorch_test_tpc_dict
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 import model_compression_toolkit as mct
@@ -47,7 +47,7 @@ def __init__(self, unit_test, num_calibration_iter=1):
         super().__init__(unit_test, num_calibration_iter=num_calibration_iter)
 
     def get_tpc(self):
-        return get_pytorch_test_tpc_dict(tp_model=get_tp_model(),
+        return get_pytorch_test_tpc_dict(tpc=get_tpc(),
                                          test_name='mixed_precision_model',
                                          ftp_name='mixed_precision_pytorch_test')
 
@@ -162,7 +162,7 @@ def get_tpc(self):
             base_config=two_bit_cfg,
         )
 
-        tp_model = schema.TargetPlatformCapabilities(
+        tpc = schema.TargetPlatformCapabilities(
             default_qco=weight_fixed_cfg,
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -171,7 +171,7 @@ def get_tpc(self):
                           schema.OperatorsSet(name="Weights_fixed", qc_options=weight_fixed_cfg)]),
             name="mp_part_weights_layers_test")
 
-        return {'mixed_precision_model': tp_model}
+        return {'mixed_precision_model': tpc}
 
     def create_feature_network(self, input_shape):
         class ConvLinearModel(torch.nn.Module):
@@ -235,7 +235,7 @@ def __init__(self, unit_test):
     def get_fw_hw_model(self):
         base_config, _, default_config = get_op_quantization_configs()
         return get_pytorch_test_tpc_dict(
-            tp_model=generate_mixed_precision_test_tp_model(
+            tpc=generate_mixed_precision_test_tpc(
                 base_cfg=base_config.clone_and_edit(enable_activation_quantization=False),
                 default_config=default_config,
                 mp_bitwidth_candidates_list=[(8, 8), (4, 8), (2, 8)]),
@@ -319,7 +319,7 @@ def get_tpc(self):
             base_config=cfg,
         )
 
-        tp_model = TargetPlatformCapabilities(
+        tpc = TargetPlatformCapabilities(
             default_qco=QuantizationConfigOptions(quantization_configurations=tuple([cfg]), base_config=cfg),
             tpc_minor_version=None,
             tpc_patch_version=None,
@@ -329,7 +329,7 @@ def get_tpc(self):
                 OperatorsSet(name="Weights", qc_options=weight_mixed_cfg)]),
             name="mp_weights_conf_act_test")
 
-        return {'mixed_precision_model': tp_model}
+        return {'mixed_precision_model': tpc}
 
     def create_feature_network(self, input_shape):
         return MixedPrecisionWeightsTestNet(input_shape)
diff --git a/tests/pytorch_tests/model_tests/feature_models/permute_substitution_test.py b/tests/pytorch_tests/model_tests/feature_models/permute_substitution_test.py
index ebf6ed68b..d65b1c156 100644
--- a/tests/pytorch_tests/model_tests/feature_models/permute_substitution_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/permute_substitution_test.py
@@ -17,7 +17,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, set_model
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 
 
@@ -30,11 +30,11 @@ def __init__(self, unit_test):
         super().__init__(unit_test=unit_test)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 32,
+        tp = generate_test_tpc({'weights_n_bits': 32,
                                      'activation_n_bits': 32,
                                      'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_pytorch_tpc(name="permute_substitution_test", tp_model=tp)
+        return generate_pytorch_tpc(name="permute_substitution_test", tpc=tp)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.NOCLIPPING,
diff --git a/tests/pytorch_tests/model_tests/feature_models/qat_test.py b/tests/pytorch_tests/model_tests/feature_models/qat_test.py
index 1d68ad95e..c0c697b41 100644
--- a/tests/pytorch_tests/model_tests/feature_models/qat_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/qat_test.py
@@ -42,8 +42,8 @@
     BasePytorchActivationTrainableQuantizer
 from model_compression_toolkit.trainable_infrastructure.pytorch.activation_quantizers.ste.symmetric_ste import \
     STESymmetricActivationTrainableQuantizer
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model, \
-    generate_tp_model_with_activation_mp
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc, \
+    generate_tpc_with_activation_mp
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
 from tests.pytorch_tests.tpc_pytorch import get_mp_activation_pytorch_tpc_dict
 
@@ -111,7 +111,7 @@ def __init__(self, unit_test, weight_bits=2, activation_bits=4,
     def get_tpc(self):
         return generate_pytorch_tpc(
             name="qat_test",
-            tp_model=generate_test_tp_model({'weights_n_bits': self.weight_bits,
+            tpc=generate_test_tpc({'weights_n_bits': self.weight_bits,
                                              'activation_n_bits': self.activation_bits,
                                              'weights_quantization_method': self.weights_quantization_method,
                                              'activation_quantization_method': self.activation_quantization_method}))
@@ -227,7 +227,7 @@ def __init__(self, unit_test, finalize=False):
     def get_tpc(self):
         return generate_pytorch_tpc(
             name="qat_test",
-            tp_model=generate_test_tp_model({'weights_n_bits': self.weight_bits,
+            tpc=generate_test_tpc({'weights_n_bits': self.weight_bits,
                                              'activation_n_bits': self.activation_bits,
                                              'weights_quantization_method': self.weights_quantization_method,
                                              'activation_quantization_method': self.activation_quantization_method}))
@@ -260,7 +260,7 @@ def __init__(self, unit_test):
     def get_tpc(self):
         base_config, _, default_config = get_op_quantization_configs()
         return get_mp_activation_pytorch_tpc_dict(
-            tpc_model=generate_tp_model_with_activation_mp(
+            tpc_model=generate_tpc_with_activation_mp(
                 base_cfg=base_config,
                 default_config=default_config,
                 mp_bitwidth_candidates_list=[(8, 8), (8, 4), (8, 2),
@@ -306,7 +306,7 @@ def __init__(self, unit_test):
     def get_tpc(self):
         base_config, _, default_config = get_op_quantization_configs()
         return get_mp_activation_pytorch_tpc_dict(
-            tpc_model=generate_tp_model_with_activation_mp(
+            tpc_model=generate_tpc_with_activation_mp(
                 base_cfg=base_config,
                 default_config=default_config,
                 mp_bitwidth_candidates_list=[(8, 8), (8, 4), (8, 2),
diff --git a/tests/pytorch_tests/model_tests/feature_models/relu_bound_test.py b/tests/pytorch_tests/model_tests/feature_models/relu_bound_test.py
index 205a5df83..345742109 100644
--- a/tests/pytorch_tests/model_tests/feature_models/relu_bound_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/relu_bound_test.py
@@ -15,7 +15,7 @@
 import torch
 
 from model_compression_toolkit.core import QuantizationConfig, QuantizationErrorMethod
-from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_tp_model
+from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_tpc
 from tests.pytorch_tests.tpc_pytorch import get_pytorch_test_tpc_dict
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 from torch.nn import Conv2d, ReLU, ReLU6, Hardtanh
@@ -101,7 +101,7 @@ def create_inputs_shape(self):
         return [[self.val_batch_size, 3, 32, 32]]
 
     def get_tpc(self):
-        return get_pytorch_test_tpc_dict(tp_model=get_tp_model(),
+        return get_pytorch_test_tpc_dict(tpc=get_tpc(),
                                          test_name='8bit_relu_bound',
                                          ftp_name='relu_bound_pytorch_test')
 
@@ -142,7 +142,7 @@ def create_inputs_shape(self):
         return [[self.val_batch_size, 3, 32, 32]]
 
     def get_tpc(self):
-        return get_pytorch_test_tpc_dict(tp_model=get_tp_model(),
+        return get_pytorch_test_tpc_dict(tpc=get_tpc(),
                                          test_name='8bit_relu_bound',
                                          ftp_name='relu_bound_pytorch_test')
 
diff --git a/tests/pytorch_tests/model_tests/feature_models/reshape_substitution_test.py b/tests/pytorch_tests/model_tests/feature_models/reshape_substitution_test.py
index b93a9abfb..d0b22afd7 100644
--- a/tests/pytorch_tests/model_tests/feature_models/reshape_substitution_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/reshape_substitution_test.py
@@ -17,7 +17,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, set_model
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 
 
@@ -30,11 +30,11 @@ def __init__(self, unit_test):
         super().__init__(unit_test=unit_test)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 32,
+        tp = generate_test_tpc({'weights_n_bits': 32,
                                      'activation_n_bits': 32,
                                      'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_pytorch_tpc(name="permute_substitution_test", tp_model=tp)
+        return generate_pytorch_tpc(name="permute_substitution_test", tpc=tp)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.NOCLIPPING,
diff --git a/tests/pytorch_tests/model_tests/feature_models/residual_collapsing_test.py b/tests/pytorch_tests/model_tests/feature_models/residual_collapsing_test.py
index 94adb0d0b..dc69ddf2a 100644
--- a/tests/pytorch_tests/model_tests/feature_models/residual_collapsing_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/residual_collapsing_test.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, torch_tensor_to_numpy, set_model
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
 
@@ -31,11 +31,11 @@ def __init__(self, unit_test):
         super().__init__(unit_test=unit_test, input_shape=(3, 16, 16))
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_n_bits': 32,
+        tp = generate_test_tpc({'weights_n_bits': 32,
                                      'activation_n_bits': 32,
                                      'enable_weights_quantization': False,
                                      'enable_activation_quantization': False})
-        return generate_pytorch_tpc(name="linear_collapsing_test", tp_model=tp)
+        return generate_pytorch_tpc(name="linear_collapsing_test", tpc=tp)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.NOCLIPPING,
diff --git a/tests/pytorch_tests/model_tests/feature_models/scale_equalization_test.py b/tests/pytorch_tests/model_tests/feature_models/scale_equalization_test.py
index 9a91b48c7..56a07f407 100644
--- a/tests/pytorch_tests/model_tests/feature_models/scale_equalization_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/scale_equalization_test.py
@@ -21,7 +21,7 @@
 import model_compression_toolkit
 from model_compression_toolkit.core.common.substitutions.scale_equalization import fixed_second_moment_after_relu, \
     fixed_mean_after_relu
-from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_tp_model
+from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_tpc
 from model_compression_toolkit.core.pytorch.utils import set_model
 from tests.pytorch_tests.tpc_pytorch import get_pytorch_test_tpc_dict
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
@@ -47,7 +47,7 @@ def create_inputs_shape(self):
         return [[self.val_batch_size, 3, 32, 32]]
 
     def get_tpc(self):
-        return get_pytorch_test_tpc_dict(tp_model=get_tp_model(),
+        return get_pytorch_test_tpc_dict(tpc=get_tpc(),
                                          test_name='8bit_scale_equalization',
                                          ftp_name='8bit_scale_equalization_pytorch_test')
 
diff --git a/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py b/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py
index 0d949de35..1ac400c7c 100644
--- a/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py
@@ -36,7 +36,7 @@
 from model_compression_toolkit.core.runner import core_runner
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 from tests.pytorch_tests.tpc_pytorch import get_pytorch_test_tpc_dict
 
@@ -89,8 +89,8 @@ def create_inputs_shape(self):
         return [[self.val_batch_size, 1, 32, 32]]
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'weights_quantization_method': QuantizationMethod.SYMMETRIC})
-        return get_pytorch_test_tpc_dict(tp_model=tp,
+        tp = generate_test_tpc({'weights_quantization_method': QuantizationMethod.SYMMETRIC})
+        return get_pytorch_test_tpc_dict(tpc=tp,
                                          test_name='8bit_second_moment_correction',
                                          ftp_name='second_moment_correction_pytorch_test')
 
@@ -316,13 +316,13 @@ def representative_data_gen():
             tpc = tpc_dict[model_name]
 
             attach2pytorch = AttachTpcToPytorch()
-            tpc = attach2pytorch.attach(tpc)
+            fqc = attach2pytorch.attach(tpc)
 
             tg, graph_after_second_moment_correction = self.prepare_graph(model_float,
                                                                           representative_data_gen,
                                                                           core_config=core_config,
                                                                           fw_info=DEFAULT_PYTORCH_INFO,
-                                                                          target_platform_capabilities=tpc)
+                                                                          framework_quantization_capabilities=fqc)
             for node in graph_after_second_moment_correction.nodes:
                 if node.layer_class == torch.nn.BatchNorm2d:
                     bf_second_moment_node = tg.find_node_by_name(node.name)[0]
@@ -350,7 +350,7 @@ def prepare_graph(self,
                       representative_data_gen: Callable,
                       core_config: CoreConfig = CoreConfig(),
                       fw_info: FrameworkInfo = DEFAULT_PYTORCH_INFO,
-                      target_platform_capabilities: FrameworkQuantizationCapabilities = DEFAULT_PYTORCH_INFO) -> \
+                      framework_quantization_capabilities: FrameworkQuantizationCapabilities = DEFAULT_PYTORCH_INFO) -> \
             Tuple[Graph, Graph]:
 
         tb_w = init_tensorboard_writer(fw_info)
@@ -363,7 +363,7 @@ def prepare_graph(self,
                                                   core_config=core_config,
                                                   fw_info=fw_info,
                                                   fw_impl=fw_impl,
-                                                  tpc=target_platform_capabilities,
+                                                  fqc=framework_quantization_capabilities,
                                                   tb_w=tb_w)
         graph_to_apply_second_moment = copy.deepcopy(tg)
         semi_quantized_model = quantized_model_builder_for_second_moment_correction(graph_to_apply_second_moment,
diff --git a/tests/pytorch_tests/model_tests/feature_models/shift_negative_activation_test.py b/tests/pytorch_tests/model_tests/feature_models/shift_negative_activation_test.py
index 7145b248f..49bae3568 100644
--- a/tests/pytorch_tests/model_tests/feature_models/shift_negative_activation_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/shift_negative_activation_test.py
@@ -17,7 +17,7 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
-from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_tp_model
+from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_tpc
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 from tests.pytorch_tests.tpc_pytorch import get_pytorch_test_tpc_dict
 
@@ -57,7 +57,7 @@ def generate_inputs(input_shapes):
         return i
 
     def get_tpc(self):
-        return get_pytorch_test_tpc_dict(tp_model=get_tp_model(),
+        return get_pytorch_test_tpc_dict(tpc=get_tpc(),
                                          test_name='all_8bit',
                                          ftp_name='sn_pytorch_test')
 
diff --git a/tests/pytorch_tests/model_tests/feature_models/symmetric_activation_test.py b/tests/pytorch_tests/model_tests/feature_models/symmetric_activation_test.py
index 3ecf16b1f..b91a389e4 100644
--- a/tests/pytorch_tests/model_tests/feature_models/symmetric_activation_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/symmetric_activation_test.py
@@ -22,7 +22,7 @@
 from model_compression_toolkit.core.common.user_info import UserInformation
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 
 """
@@ -46,11 +46,11 @@ def __init__(self, unit_test):
         self.const_input = 3
 
     def get_tpc(self):
-        tp = generate_test_tp_model({
+        tp = generate_test_tpc({
             'activation_quantization_method': QuantizationMethod.SYMMETRIC,
             "enable_weights_quantization": False,
             'activation_n_bits': 8})
-        return {'act_8bit': generate_pytorch_tpc(name="symmetric_layer_test", tp_model=tp)}
+        return {'act_8bit': generate_pytorch_tpc(name="symmetric_layer_test", tpc=tp)}
 
     def get_core_configs(self):
         qc = mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.NOCLIPPING,
diff --git a/tests/pytorch_tests/model_tests/feature_models/test_softmax_shift.py b/tests/pytorch_tests/model_tests/feature_models/test_softmax_shift.py
index ace9b5907..975d539bc 100644
--- a/tests/pytorch_tests/model_tests/feature_models/test_softmax_shift.py
+++ b/tests/pytorch_tests/model_tests/feature_models/test_softmax_shift.py
@@ -18,7 +18,7 @@
 from torch.nn.functional import softmax
 
 import model_compression_toolkit
-from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_tp_model
+from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_tpc
 from model_compression_toolkit.core.pytorch.utils import set_model
 from tests.pytorch_tests.tpc_pytorch import get_pytorch_test_tpc_dict
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
@@ -36,7 +36,7 @@ def create_inputs_shape(self):
         return [[self.val_batch_size, 3, 32, 32]]
 
     def get_tpc(self):
-        return get_pytorch_test_tpc_dict(tp_model=get_tp_model(),
+        return get_pytorch_test_tpc_dict(tpc=get_tpc(),
                                          test_name='8bit_softmax_shift',
                                          ftp_name='softmax_shift_pytorch_test')
 
diff --git a/tests/pytorch_tests/model_tests/feature_models/tpc_test.py b/tests/pytorch_tests/model_tests/feature_models/tpc_test.py
index 1a51c1312..3510fd80f 100644
--- a/tests/pytorch_tests/model_tests/feature_models/tpc_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/tpc_test.py
@@ -20,7 +20,7 @@
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, torch_tensor_to_numpy, set_model
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
 from model_compression_toolkit.constants import PYTORCH
 from mct_quantizers import PytorchQuantizationWrapper
 from mct_quantizers.pytorch.metadata import add_metadata, get_metadata, add_onnx_metadata, get_onnx_metadata
@@ -45,8 +45,8 @@ def __init__(self, tpc_name: str, *args, **kwargs):
         self.tpc_name = tpc_name
 
     def get_tpc(self):
-        tp_model_name, tp_version = self.tpc_name.split('.')
-        return mct.get_target_platform_capabilities(PYTORCH, tp_model_name, tp_version)
+        tpc_name, tp_version = self.tpc_name.split('.')
+        return mct.get_target_platform_capabilities(PYTORCH, tpc_name, tp_version)
 
     def create_networks(self):
         return DummyNet()
diff --git a/tests/pytorch_tests/model_tests/feature_models/uniform_activation_test.py b/tests/pytorch_tests/model_tests/feature_models/uniform_activation_test.py
index d391bf154..b55eec63e 100644
--- a/tests/pytorch_tests/model_tests/feature_models/uniform_activation_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/uniform_activation_test.py
@@ -20,7 +20,7 @@
 from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.core.common.user_info import UserInformation
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 
 """
@@ -33,10 +33,10 @@ def __init__(self, unit_test):
         super().__init__(unit_test)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({
+        tp = generate_test_tpc({
             'activation_quantization_method': QuantizationMethod.UNIFORM,
             'activation_n_bits': 2})
-        return {'act_2bit': generate_pytorch_tpc(name="uniform_layer_test", tp_model=tp)}
+        return {'act_2bit': generate_pytorch_tpc(name="uniform_layer_test", tpc=tp)}
 
     def get_core_configs(self):
         qc = mct.core.QuantizationConfig(mct.core.QuantizationErrorMethod.NOCLIPPING,
diff --git a/tests/pytorch_tests/model_tests/test_feature_models_runner.py b/tests/pytorch_tests/model_tests/test_feature_models_runner.py
index 53e7cfd97..81b60e410 100644
--- a/tests/pytorch_tests/model_tests/test_feature_models_runner.py
+++ b/tests/pytorch_tests/model_tests/test_feature_models_runner.py
@@ -763,9 +763,9 @@ def test_metadata(self):
         MetadataTest(self).run_test()
 
     def test_torch_tpcs(self):
-        TpcTest(f'{C.IMX500_TP_MODEL}.v1', self).run_test()
-        TpcTest(f'{C.TFLITE_TP_MODEL}.v1', self).run_test()
-        TpcTest(f'{C.QNNPACK_TP_MODEL}.v1', self).run_test()
+        TpcTest(f'{C.IMX500_TPC}.v1', self).run_test()
+        TpcTest(f'{C.TFLITE_TPC}.v1', self).run_test()
+        TpcTest(f'{C.QNNPACK_TPC}.v1', self).run_test()
 
     def test_16bit_activations(self):
         Activation16BitTest(self).run_test()
diff --git a/tests/pytorch_tests/pruning_tests/feature_networks/network_tests/conv2d_pruning_test.py b/tests/pytorch_tests/pruning_tests/feature_networks/network_tests/conv2d_pruning_test.py
index a8b92cd01..e3dc56987 100644
--- a/tests/pytorch_tests/pruning_tests/feature_networks/network_tests/conv2d_pruning_test.py
+++ b/tests/pytorch_tests/pruning_tests/feature_networks/network_tests/conv2d_pruning_test.py
@@ -16,7 +16,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit.core.pytorch.utils import torch_tensor_to_numpy
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 import numpy as np
 
 from tests.common_tests.pruning.constant_importance_metric import add_const_importance_metric, ConstImportanceMetric
diff --git a/tests/pytorch_tests/pruning_tests/feature_networks/network_tests/conv2dtranspose_conv2d_pruning_test.py b/tests/pytorch_tests/pruning_tests/feature_networks/network_tests/conv2dtranspose_conv2d_pruning_test.py
index 1cd46a5c4..37ed3bdc4 100644
--- a/tests/pytorch_tests/pruning_tests/feature_networks/network_tests/conv2dtranspose_conv2d_pruning_test.py
+++ b/tests/pytorch_tests/pruning_tests/feature_networks/network_tests/conv2dtranspose_conv2d_pruning_test.py
@@ -16,7 +16,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit.core.pytorch.utils import torch_tensor_to_numpy
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 import numpy as np
 
 from tests.common_tests.pruning.constant_importance_metric import add_const_importance_metric, ConstImportanceMetric
diff --git a/tests/pytorch_tests/pruning_tests/feature_networks/network_tests/conv2dtranspose_pruning_test.py b/tests/pytorch_tests/pruning_tests/feature_networks/network_tests/conv2dtranspose_pruning_test.py
index e04f1422c..f9a3b6314 100644
--- a/tests/pytorch_tests/pruning_tests/feature_networks/network_tests/conv2dtranspose_pruning_test.py
+++ b/tests/pytorch_tests/pruning_tests/feature_networks/network_tests/conv2dtranspose_pruning_test.py
@@ -16,7 +16,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit.core.pytorch.utils import torch_tensor_to_numpy
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 import numpy as np
 
 from tests.common_tests.pruning.constant_importance_metric import add_const_importance_metric, ConstImportanceMetric
diff --git a/tests/pytorch_tests/pruning_tests/feature_networks/network_tests/linear_pruning_test.py b/tests/pytorch_tests/pruning_tests/feature_networks/network_tests/linear_pruning_test.py
index 1623d558b..6a312df52 100644
--- a/tests/pytorch_tests/pruning_tests/feature_networks/network_tests/linear_pruning_test.py
+++ b/tests/pytorch_tests/pruning_tests/feature_networks/network_tests/linear_pruning_test.py
@@ -16,7 +16,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit.core.pytorch.utils import torch_tensor_to_numpy
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 import numpy as np
 
 from tests.common_tests.pruning.constant_importance_metric import add_const_importance_metric, ConstImportanceMetric
diff --git a/tests/pytorch_tests/pruning_tests/feature_networks/pruning_pytorch_feature_test.py b/tests/pytorch_tests/pruning_tests/feature_networks/pruning_pytorch_feature_test.py
index 657995f1e..8ede94a66 100644
--- a/tests/pytorch_tests/pruning_tests/feature_networks/pruning_pytorch_feature_test.py
+++ b/tests/pytorch_tests/pruning_tests/feature_networks/pruning_pytorch_feature_test.py
@@ -21,7 +21,7 @@
 from model_compression_toolkit.core.pytorch.pytorch_device_config import get_working_device
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, torch_tensor_to_numpy
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
 from tests.pytorch_tests.utils import count_model_prunable_params
 
@@ -46,8 +46,8 @@ def get_pruning_config(self):
         return PruningConfig(num_score_approximations=2)
 
     def get_tpc(self):
-        tp = generate_test_tp_model({'simd_size': self.simd})
-        return generate_pytorch_tpc(name="simd_test", tp_model=tp)
+        tp = generate_test_tpc({'simd_size': self.simd})
+        return generate_pytorch_tpc(name="simd_test", tpc=tp)
 
     def get_resource_utilization(self, dense_model_num_params, model):
         if not self.use_bn and torch.nn.BatchNorm2d in [type(m) for m in model.modules()]:
diff --git a/tests/pytorch_tests/tpc_pytorch.py b/tests/pytorch_tests/tpc_pytorch.py
index 055a1cffe..d4aac9470 100644
--- a/tests/pytorch_tests/tpc_pytorch.py
+++ b/tests/pytorch_tests/tpc_pytorch.py
@@ -14,22 +14,22 @@
 # ==============================================================================
 
 import model_compression_toolkit as mct
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 
 tp = mct.target_platform
 
 
-def get_pytorch_test_tpc_dict(tp_model, test_name, ftp_name):
+def get_pytorch_test_tpc_dict(tpc, test_name, ftp_name):
     return {
-        test_name: tp_model
+        test_name: tpc
     }
 
 def get_activation_quantization_disabled_pytorch_tpc(name):
-    tp = generate_test_tp_model({'enable_activation_quantization': False})
+    tp = generate_test_tpc({'enable_activation_quantization': False})
     return get_pytorch_test_tpc_dict(tp, name, name)
 
 def get_weights_quantization_disabled_pytorch_tpc(name):
-    tp = generate_test_tp_model({'enable_weights_quantization': False})
+    tp = generate_test_tpc({'enable_weights_quantization': False})
     return get_pytorch_test_tpc_dict(tp, name, name)
 
 
diff --git a/tests/pytorch_tests/xquant_tests/test_xquant_end2end.py b/tests/pytorch_tests/xquant_tests/test_xquant_end2end.py
index e9f8f01d8..483a14978 100644
--- a/tests/pytorch_tests/xquant_tests/test_xquant_end2end.py
+++ b/tests/pytorch_tests/xquant_tests/test_xquant_end2end.py
@@ -34,14 +34,14 @@
 import model_compression_toolkit as mct
 from mct_quantizers import PytorchQuantizationWrapper
 from model_compression_toolkit.constants import PYTORCH
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
 from model_compression_toolkit.xquant.common.similarity_functions import DEFAULT_SIMILARITY_METRICS_NAMES
 from model_compression_toolkit.xquant.common.xquant_config import XQuantConfig
 from model_compression_toolkit.xquant.pytorch.facade_xquant_report import xquant_report_pytorch_experimental
 from model_compression_toolkit.xquant.common.constants import OUTPUT_SIMILARITY_METRICS_REPR, \
     OUTPUT_SIMILARITY_METRICS_VAL, INTERMEDIATE_SIMILARITY_METRICS_REPR, INTERMEDIATE_SIMILARITY_METRICS_VAL, \
     XQUANT_REPR, XQUANT_VAL, CUT_MEMORY_ELEMENTS, CUT_TOTAL_SIZE
-from tests.common_tests.helpers.tpcs_for_tests.v2.tp_model import get_tp_model
+from tests.common_tests.helpers.tpcs_for_tests.v2.tpc import get_tpc
 
 
 def random_data_gen(shape=(3, 8, 8), use_labels=False, num_inputs=1, batch_size=2, num_iter=2):
@@ -73,7 +73,7 @@ def get_core_config(self):
         return mct.core.CoreConfig(debug_config=mct.core.DebugConfig(simulate_scheduler=True))
 
     def get_tpc(self):
-        return get_tp_model()
+        return get_tpc()
 
     def get_model_to_test(self):
         class BaseModelTest(torch.nn.Module):
diff --git a/tests/test_suite.py b/tests/test_suite.py
index 519c75f8f..11ce0f22a 100644
--- a/tests/test_suite.py
+++ b/tests/test_suite.py
@@ -28,7 +28,7 @@
 from tests.common_tests.function_tests.test_resource_utilization_object import TestResourceUtilizationObject
 from tests.common_tests.function_tests.test_threshold_selection import TestThresholdSelection
 from tests.common_tests.test_doc_examples import TestCommonDocsExamples
-from tests.common_tests.test_tp_model import TargetPlatformModelingTest, OpsetTest, QCOptionsTest, FusingTest, \
+from tests.common_tests.test_tpc import TargetPlatformModelingTest, OpsetTest, QCOptionsTest, FusingTest, \
     TPModelInputOutputTests
 
 found_tf = importlib.util.find_spec("tensorflow") is not None
@@ -62,7 +62,7 @@
         TestSymmetricThresholdSelectionWeights
     from tests.keras_tests.function_tests.test_uniform_quantize_tensor import TestUniformQuantizeTensor
     from tests.keras_tests.function_tests.test_uniform_range_selection_weights import TestUniformRangeSelectionWeights
-    from tests.keras_tests.non_parallel_tests.test_keras_tp_model import TestKerasTPModel
+    from tests.keras_tests.non_parallel_tests.test_keras_tpc import TestKerasTPModel
     from tests.keras_tests.function_tests.test_sensitivity_metric_interest_points import \
         TestSensitivityMetricInterestPoints
     from tests.keras_tests.function_tests.test_weights_activation_split_substitution import TestWeightsActivationSplit
@@ -99,7 +99,7 @@
     from tests.pytorch_tests.model_tests.test_feature_models_runner import FeatureModelsTestRunner
     # from tests.pytorch_tests.model_tests.test_models_runner import ModelTest
     from tests.pytorch_tests.function_tests.test_function_runner import FunctionTestRunner
-    from tests.pytorch_tests.function_tests.test_pytorch_tp_model import TestPytorchTPModel
+    from tests.pytorch_tests.function_tests.test_pytorch_tpc import TestPytorchTPModel
     from tests.pytorch_tests.trainable_infrastructure_tests.test_pytorch_trainable_infra_runner import PytorchTrainableInfrastructureTestRunner
     from tests.pytorch_tests.function_tests.test_gptq_soft_quantizer import TestGPTQSoftQuantizer as pytorch_gptq_soft_quantier_test
     from tests.pytorch_tests.function_tests.test_activation_quantization_holder_gptq import \

From b1cc0d1299051068dd6e3d08d28ff95c33a504a2 Mon Sep 17 00:00:00 2001
From: liord <lior.dikstein@altair-semi.com>
Date: Sun, 12 Jan 2025 15:04:36 +0200
Subject: [PATCH 07/18] Fix test

---
 tests/pytorch_tests/model_tests/feature_models/matmul_test.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/tests/pytorch_tests/model_tests/feature_models/matmul_test.py b/tests/pytorch_tests/model_tests/feature_models/matmul_test.py
index c457a9319..f2fe4ce74 100644
--- a/tests/pytorch_tests/model_tests/feature_models/matmul_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/matmul_test.py
@@ -15,7 +15,7 @@
 import torch
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 
 """
 This test checks the MatMul substitution function.
@@ -60,7 +60,7 @@ def get_tpc(self):
         return {
             'no_quantization': generate_pytorch_tpc(
                 name="no_quant_pytorch_test",
-                tp_model=generate_test_tp_model(
+                tpc=generate_test_tpc(
                     {
                         'weights_n_bits': 32,
                         'activation_n_bits': 32,

From 77f28aa485c9f5f718b64fc7c46b5a5f76c40078 Mon Sep 17 00:00:00 2001
From: liord <lior.dikstein@altair-semi.com>
Date: Mon, 13 Jan 2025 10:39:57 +0200
Subject: [PATCH 08/18] Remove folder "target_platform"

---
 .../core/common/fusion/layer_fusing.py        |  2 +-
 .../keras/resource_utilization_data_facade.py |  4 ++--
 .../resource_utilization_data_facade.py       |  7 +++---
 .../gptq/keras/quantization_facade.py         |  4 ++--
 .../gptq/pytorch/quantization_facade.py       |  7 +++---
 .../pruning/keras/pruning_facade.py           |  4 ++--
 .../pruning/pytorch/pruning_facade.py         |  7 +++---
 .../ptq/keras/quantization_facade.py          |  4 ++--
 .../ptq/pytorch/quantization_facade.py        |  7 +++---
 .../qat/keras/quantization_facade.py          |  6 ++---
 .../qat/pytorch/quantization_facade.py        | 10 +++-----
 .../target_platform_capabilities/constants.py |  8 +++----
 .../target_platform/__init__.py               | 23 -------------------
 .../targetplatform2framework/__init__.py      |  4 +---
 .../targetplatform2framework/attach2fw.py     |  0
 .../targetplatform2framework/attach2keras.py  |  0
 .../attach2pytorch.py                         |  0
 .../attribute_filter.py                       |  0
 .../targetplatform2framework/current_tpc.py   |  0
 .../framework_quantization_capabilities.py    |  2 +-
 ...ork_quantization_capabilities_component.py |  0
 .../layer_filter_params.py                    |  0
 .../operations_to_layers.py                   |  0
 .../get_target_platform_capabilities.py       | 12 +++++-----
 .../tpc_models/imx500_tpc/v1/tpc.py           |  4 ++--
 .../tpc_models/qnnpack_tpc/v1/tpc.py          |  4 ++--
 .../tpc_models/tflite_tpc/v1/tpc.py           |  4 ++--
 .../xquant/keras/keras_report_utils.py        |  4 ++--
 .../xquant/pytorch/pytorch_report_utils.py    |  6 ++---
 .../helpers/tpcs_for_tests/v1/tpc.py          |  4 ++--
 .../helpers/tpcs_for_tests/v1_lut/tpc.py      |  4 ++--
 .../helpers/tpcs_for_tests/v1_pot/tpc.py      |  4 ++--
 .../helpers/tpcs_for_tests/v2/tpc.py          |  4 ++--
 .../helpers/tpcs_for_tests/v2_lut/tpc.py      |  4 ++--
 .../helpers/tpcs_for_tests/v3/tpc.py          |  4 ++--
 .../helpers/tpcs_for_tests/v3_lut/tpc.py      |  4 ++--
 .../helpers/tpcs_for_tests/v4/tpc.py          |  4 ++--
 .../test_networks_runner_float.py             |  4 ++--
 .../keras_fake_quant_exporter_base_test.py    |  4 ++--
 .../tflite_fake_quant_exporter_base_test.py   |  4 ++--
 .../base_keras_feature_test.py                |  4 ++--
 .../feature_networks/activation_16bit_test.py |  2 +-
 .../feature_networks/compute_max_cut_test.py  |  2 +-
 .../const_representation_test.py              |  4 ++--
 .../conv_func_substitutions_test.py           |  2 +-
 .../feature_networks/metadata_test.py         |  2 +-
 .../second_moment_correction_test.py          |  4 ++--
 .../test_features_runner.py                   |  6 ++---
 ...test_export_keras_fully_quantized_model.py |  4 ++--
 .../non_parallel_tests/test_keras_tpc.py      | 18 +++++++--------
 .../test_tensorboard_writer.py                |  2 +-
 .../xquant_tests/test_xquant_end2end.py       |  2 +-
 .../base_pytorch_export_test.py               |  4 ++--
 .../function_tests/layer_fusing_test.py       |  2 +-
 .../set_layer_to_bitwidth_test.py             |  2 +-
 ...est_activation_quantization_holder_gptq.py |  2 +-
 ...st_export_pytorch_fully_quantized_model.py |  4 ++--
 .../test_hessian_info_calculator.py           |  3 +--
 .../function_tests/test_hessian_service.py    |  4 +---
 .../function_tests/test_pytorch_tpc.py        | 18 +++++++--------
 .../test_quant_config_filtering.py            |  2 +-
 ...t_sensitivity_eval_non_supported_output.py |  2 +-
 .../model_tests/base_pytorch_feature_test.py  |  4 ++--
 .../feature_models/activation_16bit_test.py   |  2 +-
 .../feature_models/compute_max_cut_test.py    |  2 +-
 .../feature_models/metadata_test.py           |  2 +-
 .../second_moment_correction_test.py          |  4 ++--
 .../model_tests/feature_models/tpc_test.py    |  2 +-
 .../model_tests/test_feature_models_runner.py |  6 ++---
 .../xquant_tests/test_xquant_end2end.py       |  2 +-
 70 files changed, 130 insertions(+), 166 deletions(-)
 delete mode 100644 model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py
 rename model_compression_toolkit/target_platform_capabilities/{target_platform => }/targetplatform2framework/__init__.py (83%)
 rename model_compression_toolkit/target_platform_capabilities/{target_platform => }/targetplatform2framework/attach2fw.py (100%)
 rename model_compression_toolkit/target_platform_capabilities/{target_platform => }/targetplatform2framework/attach2keras.py (100%)
 rename model_compression_toolkit/target_platform_capabilities/{target_platform => }/targetplatform2framework/attach2pytorch.py (100%)
 rename model_compression_toolkit/target_platform_capabilities/{target_platform => }/targetplatform2framework/attribute_filter.py (100%)
 rename model_compression_toolkit/target_platform_capabilities/{target_platform => }/targetplatform2framework/current_tpc.py (100%)
 rename model_compression_toolkit/target_platform_capabilities/{target_platform => }/targetplatform2framework/framework_quantization_capabilities.py (99%)
 rename model_compression_toolkit/target_platform_capabilities/{target_platform => }/targetplatform2framework/framework_quantization_capabilities_component.py (100%)
 rename model_compression_toolkit/target_platform_capabilities/{target_platform => }/targetplatform2framework/layer_filter_params.py (100%)
 rename model_compression_toolkit/target_platform_capabilities/{target_platform => }/targetplatform2framework/operations_to_layers.py (100%)

diff --git a/model_compression_toolkit/core/common/fusion/layer_fusing.py b/model_compression_toolkit/core/common/fusion/layer_fusing.py
index 98303b207..e76aad1a3 100644
--- a/model_compression_toolkit/core/common/fusion/layer_fusing.py
+++ b/model_compression_toolkit/core/common/fusion/layer_fusing.py
@@ -17,7 +17,7 @@
 from model_compression_toolkit.core.common.graph.base_graph import Graph
 from model_compression_toolkit.core.common.graph.base_node import BaseNode
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.layer_filter_params import LayerFilterParams
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.layer_filter_params import LayerFilterParams
 
 
 def filter_fusing_patterns(fusing_patterns: List[List[Any]], node: BaseNode, idx: int = 0) -> List[List[Any]]:
diff --git a/model_compression_toolkit/core/keras/resource_utilization_data_facade.py b/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
index 4ddfe75a8..493007d44 100644
--- a/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
+++ b/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
@@ -24,7 +24,7 @@
 from model_compression_toolkit.verify_packages import FOUND_TF
 
 if FOUND_TF:
-    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
     from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
     from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
     from tensorflow.keras.models import Model
@@ -33,7 +33,7 @@
 
     from model_compression_toolkit import get_target_platform_capabilities
 
-    KERAS_DEFAULT_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
+    KERAS_DEFAULT_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
 
     def keras_resource_utilization_data(in_model: Model,
                                         representative_data_gen: Callable,
diff --git a/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py b/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py
index ff130265f..a2f7c93f2 100644
--- a/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py
+++ b/model_compression_toolkit/core/pytorch/resource_utilization_data_facade.py
@@ -18,24 +18,23 @@
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.constants import PYTORCH
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization_data import compute_resource_utilization_data
 from model_compression_toolkit.core.common.quantization.core_config import CoreConfig
 from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import MixedPrecisionQuantizationConfig
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 from model_compression_toolkit.verify_packages import FOUND_TORCH
 
 if FOUND_TORCH:
     from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
     from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
     from torch.nn import Module
-    from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
+    from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \
         AttachTpcToPytorch
 
     from model_compression_toolkit import get_target_platform_capabilities
 
-    PYTORCH_DEFAULT_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
+    PYTORCH_DEFAULT_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
 
 
     def pytorch_resource_utilization_data(in_model: Module,
diff --git a/model_compression_toolkit/gptq/keras/quantization_facade.py b/model_compression_toolkit/gptq/keras/quantization_facade.py
index fcfe11053..2da9b0bc6 100644
--- a/model_compression_toolkit/gptq/keras/quantization_facade.py
+++ b/model_compression_toolkit/gptq/keras/quantization_facade.py
@@ -44,7 +44,7 @@
     from model_compression_toolkit.core.keras.keras_model_validation import KerasModelValidation
     from tensorflow.keras.models import Model
     from model_compression_toolkit.gptq.keras.gptq_loss import GPTQMultipleTensorsLoss, sample_layer_attention_loss
-    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
     from model_compression_toolkit.exporter.model_wrapper import get_exportable_keras_model
     from model_compression_toolkit import get_target_platform_capabilities
     from mct_quantizers.keras.metadata import add_metadata
@@ -59,7 +59,7 @@
     else:
         from tensorflow.python.keras.optimizer_v2.optimizer_v2 import OptimizerV2
 
-    DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
+    DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
 
     def get_keras_gptq_config(n_epochs: int,
                               optimizer: OptimizerV2 = None,
diff --git a/model_compression_toolkit/gptq/pytorch/quantization_facade.py b/model_compression_toolkit/gptq/pytorch/quantization_facade.py
index 8c9049e99..1bc455c34 100644
--- a/model_compression_toolkit/gptq/pytorch/quantization_facade.py
+++ b/model_compression_toolkit/gptq/pytorch/quantization_facade.py
@@ -32,7 +32,6 @@
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.metadata import create_model_metadata
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.verify_packages import FOUND_TORCH
 
 
@@ -40,7 +39,7 @@
 if FOUND_TORCH:
     from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
     from model_compression_toolkit.gptq.pytorch.gptq_pytorch_implementation import GPTQPytorchImplemantation
-    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
     from model_compression_toolkit.gptq.pytorch.gptq_loss import multiple_tensors_mse_loss, sample_layer_attention_loss
     from model_compression_toolkit.exporter.model_wrapper.pytorch.builder.fully_quantized_model_builder import get_exportable_pytorch_model
     import torch
@@ -48,10 +47,10 @@
     from torch.optim import Adam, Optimizer
     from model_compression_toolkit import get_target_platform_capabilities
     from mct_quantizers.pytorch.metadata import add_metadata
-    from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
+    from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \
         AttachTpcToPytorch
 
-    DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
+    DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
 
     def get_pytorch_gptq_config(n_epochs: int,
                                 optimizer: Optimizer = None,
diff --git a/model_compression_toolkit/pruning/keras/pruning_facade.py b/model_compression_toolkit/pruning/keras/pruning_facade.py
index 8fee1abcd..beda9c3f2 100644
--- a/model_compression_toolkit/pruning/keras/pruning_facade.py
+++ b/model_compression_toolkit/pruning/keras/pruning_facade.py
@@ -28,7 +28,7 @@
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.quantization.quantization_config import DEFAULTCONFIG
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 
 if FOUND_TF:
     from model_compression_toolkit.core.keras.back2framework.float_model_builder import FloatKerasModelBuilder
@@ -38,7 +38,7 @@
     from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
         AttachTpcToKeras
 
-    DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
+    DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
 
     def keras_pruning_experimental(model: Model,
                                    target_resource_utilization: ResourceUtilization,
diff --git a/model_compression_toolkit/pruning/pytorch/pruning_facade.py b/model_compression_toolkit/pruning/pytorch/pruning_facade.py
index 47bc15df2..4b97cc7bd 100644
--- a/model_compression_toolkit/pruning/pytorch/pruning_facade.py
+++ b/model_compression_toolkit/pruning/pytorch/pruning_facade.py
@@ -25,9 +25,8 @@
 from model_compression_toolkit.core.common.quantization.set_node_quantization_config import set_quantization_configuration_to_graph
 from model_compression_toolkit.core.graph_prep_runner import read_model_to_graph
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.quantization.quantization_config import DEFAULTCONFIG
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 
 
 # Check if PyTorch is available in the environment.
@@ -38,11 +37,11 @@
         PruningPytorchImplementation
     from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
     from torch.nn import Module
-    from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
+    from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \
         AttachTpcToPytorch
 
     # Set the default Target Platform Capabilities (TPC) for PyTorch.
-    DEFAULT_PYOTRCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
+    DEFAULT_PYOTRCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
 
     def pytorch_pruning_experimental(model: Module,
                                      target_resource_utilization: ResourceUtilization,
diff --git a/model_compression_toolkit/ptq/keras/quantization_facade.py b/model_compression_toolkit/ptq/keras/quantization_facade.py
index 2712d8f47..3f0e960b1 100644
--- a/model_compression_toolkit/ptq/keras/quantization_facade.py
+++ b/model_compression_toolkit/ptq/keras/quantization_facade.py
@@ -37,7 +37,7 @@
     from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
     from model_compression_toolkit.core.keras.keras_model_validation import KerasModelValidation
     from tensorflow.keras.models import Model
-    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
     from model_compression_toolkit.exporter.model_wrapper import get_exportable_keras_model
 
     from model_compression_toolkit import get_target_platform_capabilities
@@ -45,7 +45,7 @@
     from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
         AttachTpcToKeras
 
-    DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
+    DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
 
 
     def keras_post_training_quantization(in_model: Model,
diff --git a/model_compression_toolkit/ptq/pytorch/quantization_facade.py b/model_compression_toolkit/ptq/pytorch/quantization_facade.py
index 506bbabea..fdba01ecb 100644
--- a/model_compression_toolkit/ptq/pytorch/quantization_facade.py
+++ b/model_compression_toolkit/ptq/pytorch/quantization_facade.py
@@ -21,7 +21,6 @@
 from model_compression_toolkit.constants import PYTORCH
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from model_compression_toolkit.verify_packages import FOUND_TORCH
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.core import CoreConfig
 from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
@@ -35,15 +34,15 @@
 if FOUND_TORCH:
     from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
     from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
-    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
     from torch.nn import Module
     from model_compression_toolkit.exporter.model_wrapper.pytorch.builder.fully_quantized_model_builder import get_exportable_pytorch_model
     from model_compression_toolkit import get_target_platform_capabilities
     from mct_quantizers.pytorch.metadata import add_metadata
-    from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
+    from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \
         AttachTpcToPytorch
 
-    DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
+    DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
 
     def pytorch_post_training_quantization(in_module: Module,
                                            representative_data_gen: Callable,
diff --git a/model_compression_toolkit/qat/keras/quantization_facade.py b/model_compression_toolkit/qat/keras/quantization_facade.py
index 633d0a568..a38600132 100644
--- a/model_compression_toolkit/qat/keras/quantization_facade.py
+++ b/model_compression_toolkit/qat/keras/quantization_facade.py
@@ -38,7 +38,7 @@
     from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
     from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
     from model_compression_toolkit.core.keras.keras_model_validation import KerasModelValidation
-    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 
     from model_compression_toolkit.core.keras.back2framework.keras_model_builder import KerasModelBuilder
 
@@ -50,7 +50,7 @@
     from model_compression_toolkit.constants import TENSORFLOW
     from model_compression_toolkit.core.common.framework_info import FrameworkInfo
     from model_compression_toolkit.qat.common.qat_config import is_qat_applicable
-    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
     from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
     from model_compression_toolkit.qat.keras.quantizer.quantization_builder import quantization_builder, \
     get_activation_quantizer_holder
@@ -58,7 +58,7 @@
     from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
         AttachTpcToKeras
 
-    DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
+    DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
 
 
     def qat_wrapper(n: common.BaseNode,
diff --git a/model_compression_toolkit/qat/pytorch/quantization_facade.py b/model_compression_toolkit/qat/pytorch/quantization_facade.py
index 1ea101158..55537eadb 100644
--- a/model_compression_toolkit/qat/pytorch/quantization_facade.py
+++ b/model_compression_toolkit/qat/pytorch/quantization_facade.py
@@ -12,13 +12,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ==============================================================================
-import copy
 from typing import Callable
 from functools import partial
 
 from model_compression_toolkit.constants import PYTORCH
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
 from model_compression_toolkit.verify_packages import FOUND_TORCH
 
@@ -26,12 +25,9 @@
 from model_compression_toolkit.core import common
 from model_compression_toolkit.core.common.visualization.tensorboard_writer import init_tensorboard_writer
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.core.common.framework_info import FrameworkInfo
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
     MixedPrecisionQuantizationConfig
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import \
-    FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.runner import core_runner
 from model_compression_toolkit.ptq.runner import ptq_runner
 
@@ -40,7 +36,7 @@
     from torch.nn import Module
     from mct_quantizers import PytorchActivationQuantizationHolder
     from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
-    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+    from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
     from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
     from model_compression_toolkit.qat.common.qat_config import is_qat_applicable
     from model_compression_toolkit.core.pytorch.back2framework.pytorch_model_builder import PyTorchModelBuilder
@@ -50,7 +46,7 @@
     from model_compression_toolkit.qat.pytorch.quantizer.quantization_builder import get_activation_quantizer_holder
     from model_compression_toolkit.qat.pytorch.quantizer.quantization_builder import quantization_builder
 
-    DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
+    DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
 
 
     def qat_wrapper(n: common.BaseNode,
diff --git a/model_compression_toolkit/target_platform_capabilities/constants.py b/model_compression_toolkit/target_platform_capabilities/constants.py
index 44fc1a5b1..3d266ec8c 100644
--- a/model_compression_toolkit/target_platform_capabilities/constants.py
+++ b/model_compression_toolkit/target_platform_capabilities/constants.py
@@ -21,10 +21,10 @@
 
 
 # Supported TP models names:
-DEFAULT_TPC = 'default'
-IMX500_TPC = 'imx500'
-TFLITE_TPC = 'tflite'
-QNNPACK_TPC = 'qnnpack'
+DEFAULT_TP_MODEL= 'default'
+IMX500_TP_MODEL = 'imx500'
+TFLITE_TP_MODEL = 'tflite'
+QNNPACK_TP_MODEL = 'qnnpack'
 
 # TP Attributes
 KERNEL_ATTR = "kernel_attr"
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py b/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py
deleted file mode 100644
index d06da049d..000000000
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/__init__.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2022 Sony Semiconductor Israel, Inc. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ==============================================================================
-
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import AttributeFilter
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities, OperationsSetToLayers, Smaller, SmallerEq, NotEq, Eq, GreaterEq, Greater, LayerFilterParams, OperationsToLayers, get_current_tpc
-from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OperatorsSet, \
-    OperatorSetGroup, Signedness, AttributeQuantizationConfig, OpQuantizationConfig, QuantizationConfigOptions, Fusing
-
-from mct_quantizers import QuantizationMethod
-
-
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py
similarity index 83%
rename from model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py
rename to model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py
index 2f1e68bfb..b3ee5dff4 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/__init__.py
+++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py
@@ -15,9 +15,7 @@
 
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.current_tpc import get_current_tpc
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.framework_quantization_capabilities import FrameworkQuantizationCapabilities
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import \
-    Eq, GreaterEq, NotEq, SmallerEq, Greater, Smaller
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.layer_filter_params import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.layer_filter_params import \
     LayerFilterParams
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.operations_to_layers import \
     OperationsToLayers, OperationsSetToLayers
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2fw.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2fw.py
similarity index 100%
rename from model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2fw.py
rename to model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2fw.py
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2keras.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2keras.py
similarity index 100%
rename from model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2keras.py
rename to model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2keras.py
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2pytorch.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2pytorch.py
similarity index 100%
rename from model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attach2pytorch.py
rename to model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2pytorch.py
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attribute_filter.py
similarity index 100%
rename from model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/attribute_filter.py
rename to model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attribute_filter.py
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/current_tpc.py
similarity index 100%
rename from model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py
rename to model_compression_toolkit/target_platform_capabilities/targetplatform2framework/current_tpc.py
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities.py
similarity index 99%
rename from model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities.py
rename to model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities.py
index 3b0d8e417..e5af3d2e7 100644
--- a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities.py
+++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities.py
@@ -24,7 +24,7 @@
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.operations_to_layers import \
     OperationsToLayers, OperationsSetToLayers
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.framework_quantization_capabilities_component import FrameworkQuantizationCapabilitiesComponent
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.layer_filter_params import LayerFilterParams
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.layer_filter_params import LayerFilterParams
 from model_compression_toolkit.target_platform_capabilities.immutable import ImmutableClass
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OperatorsSetBase, \
     OpQuantizationConfig, QuantizationConfigOptions
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities_component.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities_component.py
similarity index 100%
rename from model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/framework_quantization_capabilities_component.py
rename to model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities_component.py
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/layer_filter_params.py
similarity index 100%
rename from model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/layer_filter_params.py
rename to model_compression_toolkit/target_platform_capabilities/targetplatform2framework/layer_filter_params.py
diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/operations_to_layers.py
similarity index 100%
rename from model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/operations_to_layers.py
rename to model_compression_toolkit/target_platform_capabilities/targetplatform2framework/operations_to_layers.py
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py
index f51cd7169..c75a9bebe 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/get_target_platform_capabilities.py
@@ -13,8 +13,8 @@
 # limitations under the License.
 # ==============================================================================
 from model_compression_toolkit.constants import TENSORFLOW, PYTORCH
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC, IMX500_TPC, \
-    TFLITE_TPC, QNNPACK_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL, IMX500_TP_MODEL, \
+    TFLITE_TP_MODEL, QNNPACK_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.v1.tpc import get_tpc as get_tpc_imx500_v1
@@ -42,17 +42,17 @@ def get_target_platform_capabilities(fw_name: str,
 
     assert fw_name in [TENSORFLOW, PYTORCH], f"Unsupported framework {fw_name}."
 
-    if target_platform_name == DEFAULT_TPC:
+    if target_platform_name == DEFAULT_TP_MODEL:
         return get_tpc_imx500_v1()
 
     assert target_platform_version == 'v1' or target_platform_version is None, \
         "The usage of get_target_platform_capabilities API is supported only with the default TPC ('v1')."
 
-    if target_platform_name == IMX500_TPC:
+    if target_platform_name == IMX500_TP_MODEL:
         return get_tpc_imx500_v1()
-    elif target_platform_name == TFLITE_TPC:
+    elif target_platform_name == TFLITE_TP_MODEL:
         return get_tpc_tflite_v1()
-    elif target_platform_name == QNNPACK_TPC:
+    elif target_platform_name == QNNPACK_TP_MODEL:
         return get_tpc_qnnpack_v1()
 
     raise ValueError(f"Unsupported target platform name {target_platform_name}.")
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py
index d7e489b54..b4e6f273d 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    IMX500_TPC
+    IMX500_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
@@ -241,7 +241,7 @@ def generate_tpc(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=1,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TPC,
+        tpc_platform_type=IMX500_TP_MODEL,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         name=name,
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py
index e75d89970..979febe5d 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py
@@ -17,7 +17,7 @@
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
-from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, QNNPACK_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, QNNPACK_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
@@ -178,7 +178,7 @@ def generate_tpc(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=1,
         tpc_patch_version=0,
-        tpc_platform_type=QNNPACK_TPC,
+        tpc_platform_type=QNNPACK_TP_MODEL,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=False,
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py
index 7a8a1bc4e..8e00a048d 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py
@@ -17,7 +17,7 @@
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
-from model_compression_toolkit.target_platform_capabilities.constants import BIAS_ATTR, KERNEL_ATTR, TFLITE_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import BIAS_ATTR, KERNEL_ATTR, TFLITE_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
@@ -218,7 +218,7 @@ def generate_tpc(default_config: OpQuantizationConfig,
         tpc_patch_version=0,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
-        tpc_platform_type=TFLITE_TPC,
+        tpc_platform_type=TFLITE_TP_MODEL,
         add_metadata=False,
         name=name)
 
diff --git a/model_compression_toolkit/xquant/keras/keras_report_utils.py b/model_compression_toolkit/xquant/keras/keras_report_utils.py
index a83c7906e..73c249b6f 100644
--- a/model_compression_toolkit/xquant/keras/keras_report_utils.py
+++ b/model_compression_toolkit/xquant/keras/keras_report_utils.py
@@ -26,7 +26,7 @@
 from model_compression_toolkit.xquant.keras.similarity_functions import KerasSimilarityFunctions
 from model_compression_toolkit.xquant.keras.tensorboard_utils import KerasTensorboardUtils
 from mct_quantizers.keras.metadata import get_metadata
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 
@@ -44,7 +44,7 @@ def __init__(self, report_dir: str):
         fw_impl = KerasImplementation()
 
         # Set the default Target Platform Capabilities (TPC) for Keras.
-        default_tpc = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
+        default_tpc = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
         attach2pytorch = AttachTpcToKeras()
         framework_platform_capabilities = attach2pytorch.attach(default_tpc)
 
diff --git a/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py b/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py
index 3aa737462..0ee7db030 100644
--- a/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py
+++ b/model_compression_toolkit/xquant/pytorch/pytorch_report_utils.py
@@ -15,8 +15,8 @@
 from model_compression_toolkit import get_target_platform_capabilities
 from model_compression_toolkit.constants import PYTORCH
 from model_compression_toolkit.core.pytorch.utils import get_working_device
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
 
 from model_compression_toolkit.xquant.common.framework_report_utils import FrameworkReportUtils
@@ -42,7 +42,7 @@ def __init__(self, report_dir: str):
         fw_info = DEFAULT_PYTORCH_INFO
         fw_impl = PytorchImplementation()
         # Set the default Target Platform Capabilities (TPC) for PyTorch.
-        default_tpc = get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
+        default_tpc = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
         attach2pytorch = AttachTpcToPytorch()
         framework_quantization_capabilities = attach2pytorch.attach(default_tpc)
 
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py
index be841ab5f..89f9735f0 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    IMX500_TPC
+    IMX500_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
@@ -242,7 +242,7 @@ def generate_tpc(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=1,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TPC,
+        tpc_platform_type=IMX500_TP_MODEL,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         name=name,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py
index b2f09c611..40cd8414e 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    WEIGHTS_QUANTIZATION_METHOD, IMX500_TPC
+    WEIGHTS_QUANTIZATION_METHOD, IMX500_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
@@ -274,7 +274,7 @@ def generate_tpc(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=2,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TPC,
+        tpc_platform_type=IMX500_TP_MODEL,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=True,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py
index c2a708cc7..e16be42b8 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    IMX500_TPC
+    IMX500_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
@@ -270,7 +270,7 @@ def generate_tpc(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=2,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TPC,
+        tpc_platform_type=IMX500_TP_MODEL,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=True,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py
index 89aa63f82..5fd88882a 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    IMX500_TPC
+    IMX500_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
@@ -253,7 +253,7 @@ def generate_tpc(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=2,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TPC,
+        tpc_platform_type=IMX500_TP_MODEL,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=True,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py
index 6eb1fe242..583781f7f 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    WEIGHTS_QUANTIZATION_METHOD, IMX500_TPC
+    WEIGHTS_QUANTIZATION_METHOD, IMX500_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
@@ -276,7 +276,7 @@ def generate_tpc(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=2,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TPC,
+        tpc_platform_type=IMX500_TP_MODEL,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=True,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py
index 95e1c6786..75c15d2b7 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    IMX500_TPC
+    IMX500_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
@@ -283,7 +283,7 @@ def generate_tpc(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=3,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TPC,
+        tpc_platform_type=IMX500_TP_MODEL,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=True,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py
index 530ea55d3..631c82513 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    WEIGHTS_QUANTIZATION_METHOD, IMX500_TPC
+    WEIGHTS_QUANTIZATION_METHOD, IMX500_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
@@ -308,7 +308,7 @@ def generate_tpc(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=3,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TPC,
+        tpc_platform_type=IMX500_TP_MODEL,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=True,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py
index 5518ac558..9c410c503 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit.target_platform_capabilities.schema.v1 as schema
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
-    IMX500_TPC
+    IMX500_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
@@ -321,7 +321,7 @@ def generate_tpc(default_config: OpQuantizationConfig,
         default_qco=default_configuration_options,
         tpc_minor_version=4,
         tpc_patch_version=0,
-        tpc_platform_type=IMX500_TPC,
+        tpc_platform_type=IMX500_TP_MODEL,
         operator_set=tuple(operator_set),
         fusing_patterns=tuple(fusing_patterns),
         add_metadata=True,
diff --git a/tests/external_tests/keras_tests/models_tests/test_networks_runner_float.py b/tests/external_tests/keras_tests/models_tests/test_networks_runner_float.py
index 0e0ae20ae..e5ca3e3da 100644
--- a/tests/external_tests/keras_tests/models_tests/test_networks_runner_float.py
+++ b/tests/external_tests/keras_tests/models_tests/test_networks_runner_float.py
@@ -26,7 +26,7 @@
 from model_compression_toolkit.core.common.quantization.set_node_quantization_config import \
     set_quantization_configuration_to_graph
 from model_compression_toolkit.core.common.substitutions.apply_substitutions import substitute
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
 from model_compression_toolkit.core.keras.reader.reader import model_reader
@@ -56,7 +56,7 @@ def run_network(self, inputs_list):
         fw_impl = KerasImplementation()
         fw_info = DEFAULT_KERAS_INFO
 
-        keras_default_tpc = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
+        keras_default_tpc = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
 
         graph = model_reader(self.model_float)  # model reading
         graph.set_fw_info(DEFAULT_KERAS_INFO)
diff --git a/tests/keras_tests/exporter_tests/keras_fake_quant/keras_fake_quant_exporter_base_test.py b/tests/keras_tests/exporter_tests/keras_fake_quant/keras_fake_quant_exporter_base_test.py
index 864714cc9..629af64a3 100644
--- a/tests/keras_tests/exporter_tests/keras_fake_quant/keras_fake_quant_exporter_base_test.py
+++ b/tests/keras_tests/exporter_tests/keras_fake_quant/keras_fake_quant_exporter_base_test.py
@@ -23,7 +23,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit import get_target_platform_capabilities
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 from model_compression_toolkit.exporter.model_exporter.keras.base_keras_exporter import DEFAULT_KERAS_EXPORT_EXTENTION
 
 def get_minmax_from_qparams(qparams):
@@ -82,7 +82,7 @@ def get_input_shape(self):
         return [(16, 16, 3)]
 
     def get_tpc(self):
-        return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
+        return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
 
     def get_quantization_config(self):
         return mct.core.QuantizationConfig()
diff --git a/tests/keras_tests/exporter_tests/tflite_fake_quant/tflite_fake_quant_exporter_base_test.py b/tests/keras_tests/exporter_tests/tflite_fake_quant/tflite_fake_quant_exporter_base_test.py
index 73df77df3..887a14452 100644
--- a/tests/keras_tests/exporter_tests/tflite_fake_quant/tflite_fake_quant_exporter_base_test.py
+++ b/tests/keras_tests/exporter_tests/tflite_fake_quant/tflite_fake_quant_exporter_base_test.py
@@ -21,7 +21,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit import get_target_platform_capabilities
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 import tests.keras_tests.exporter_tests.constants as constants
 from model_compression_toolkit.exporter.model_exporter.keras.base_keras_exporter import DEFAULT_KERAS_EXPORT_EXTENTION
 
@@ -68,7 +68,7 @@ def get_input_shape(self):
         return [(16, 16, 3)]
 
     def get_tpc(self):
-        return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
+        return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
 
     def __get_repr_dataset(self):
         yield [np.random.randn(*((1,) + shape)) for shape in self.get_input_shape()]
diff --git a/tests/keras_tests/feature_networks_tests/base_keras_feature_test.py b/tests/keras_tests/feature_networks_tests/base_keras_feature_test.py
index a3c611a35..9e9b8aa6a 100644
--- a/tests/keras_tests/feature_networks_tests/base_keras_feature_test.py
+++ b/tests/keras_tests/feature_networks_tests/base_keras_feature_test.py
@@ -14,7 +14,7 @@
 # ==============================================================================
 from model_compression_toolkit.constants import TENSORFLOW
 from model_compression_toolkit.core.common.framework_implementation import FrameworkImplementation
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.gptq import keras_gradient_post_training_quantization
 from model_compression_toolkit.core import FrameworkInfo
@@ -39,7 +39,7 @@ def __init__(self,
                          input_shape=input_shape)
 
     def get_tpc(self):
-        return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
+        return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
 
     def get_ptq_facade(self):
         return keras_post_training_quantization
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/activation_16bit_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/activation_16bit_test.py
index 05bcd08dc..be154a1fa 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/activation_16bit_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/activation_16bit_test.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit.constants import TENSORFLOW
 from model_compression_toolkit.core import MixedPrecisionQuantizationConfig
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
 from mct_quantizers.keras.activation_quantization_holder import KerasActivationQuantizationHolder
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OperatorSetNames, \
     QuantizationConfigOptions
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/compute_max_cut_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/compute_max_cut_test.py
index 579d9c2de..64d7c81e9 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/compute_max_cut_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/compute_max_cut_test.py
@@ -18,7 +18,7 @@
 
 from mct_quantizers.keras.metadata import get_metadata
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
 from tests.common_tests.helpers.tpcs_for_tests.v2.tpc import get_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/const_representation_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/const_representation_test.py
index ab497f630..7d662f3ed 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/const_representation_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/const_representation_test.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit import get_target_platform_capabilities
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
@@ -90,7 +90,7 @@ def generate_inputs(self):
         return [1 + np.random.random(in_shape) for in_shape in self.get_input_shapes()]
 
     def get_tpc(self):
-        return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
+        return get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
 
     def create_networks(self):
         inputs = layers.Input(shape=self.get_input_shapes()[0][1:])
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/conv_func_substitutions_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/conv_func_substitutions_test.py
index 7c8ad6967..075c42bcb 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/conv_func_substitutions_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/conv_func_substitutions_test.py
@@ -23,7 +23,7 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/metadata_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/metadata_test.py
index bf3e317a2..5089c1d97 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/metadata_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/metadata_test.py
@@ -22,7 +22,7 @@
 from mct_quantizers.keras.metadata import add_metadata, get_metadata
 
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
 
 keras = tf.keras
 layers = keras.layers
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py
index d5b2debfc..44fb8de79 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py
@@ -35,7 +35,7 @@
 from model_compression_toolkit.core.keras.statistics_correction.apply_second_moment_correction import \
     keras_apply_second_moment_correction
 from model_compression_toolkit.core.runner import core_runner
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
@@ -46,7 +46,7 @@
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 from tests.keras_tests.utils import get_layers_from_model_by_type
 
-DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
+DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
 from tensorflow.keras.models import Model
 
 keras = tf.keras
diff --git a/tests/keras_tests/feature_networks_tests/test_features_runner.py b/tests/keras_tests/feature_networks_tests/test_features_runner.py
index 2cda42f67..e34525ac1 100644
--- a/tests/keras_tests/feature_networks_tests/test_features_runner.py
+++ b/tests/keras_tests/feature_networks_tests/test_features_runner.py
@@ -864,9 +864,9 @@ def test_metadata(self):
         MetadataTest(self).run_test()
 
     def test_keras_tpcs(self):
-        TpcTest(f'{C.IMX500_TPC}.v1', self).run_test()
-        TpcTest(f'{C.TFLITE_TPC}.v1', self).run_test()
-        TpcTest(f'{C.QNNPACK_TPC}.v1', self).run_test()
+        TpcTest(f'{C.IMX500_TP_MODEL}.v1', self).run_test()
+        TpcTest(f'{C.TFLITE_TP_MODEL}.v1', self).run_test()
+        TpcTest(f'{C.QNNPACK_TP_MODEL}.v1', self).run_test()
 
     def test_sigmoid_mul_substitution(self):
         SigMulSubstitutionTest(self).run_test()
diff --git a/tests/keras_tests/function_tests/test_export_keras_fully_quantized_model.py b/tests/keras_tests/function_tests/test_export_keras_fully_quantized_model.py
index 18b00d0b8..fc39f013e 100644
--- a/tests/keras_tests/function_tests/test_export_keras_fully_quantized_model.py
+++ b/tests/keras_tests/function_tests/test_export_keras_fully_quantized_model.py
@@ -27,12 +27,12 @@
 from model_compression_toolkit.trainable_infrastructure.keras.load_model import \
     keras_load_quantized_model
 
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 from model_compression_toolkit.constants import TENSORFLOW
 from model_compression_toolkit import get_target_platform_capabilities
 from tests.keras_tests.utils import get_layers_from_model_by_type
 
-DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
+DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
 
 _, SAVED_EXPORTABLE_MODEL_PATH_TF = tempfile.mkstemp('.h5')
 _, SAVED_MODEL_PATH_TF = tempfile.mkstemp('.h5')
diff --git a/tests/keras_tests/non_parallel_tests/test_keras_tpc.py b/tests/keras_tests/non_parallel_tests/test_keras_tpc.py
index 63cf0950b..973a6a6b5 100644
--- a/tests/keras_tests/non_parallel_tests/test_keras_tpc.py
+++ b/tests/keras_tests/non_parallel_tests/test_keras_tpc.py
@@ -42,8 +42,8 @@
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import \
     Greater, \
     Smaller, GreaterEq, Eq, SmallerEq, Contains
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC, IMX500_TPC, \
-    QNNPACK_TPC, TFLITE_TPC, KERNEL_ATTR, BIAS_ATTR, KERAS_KERNEL, BIAS, WEIGHTS_N_BITS
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL, IMX500_TP_MODEL, \
+    QNNPACK_TP_MODEL, TFLITE_TP_MODEL, KERNEL_ATTR, BIAS_ATTR, KERAS_KERNEL, BIAS, WEIGHTS_N_BITS
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
 
 tp = mct.target_platform
@@ -297,7 +297,7 @@ def test_get_default_op_qc(self):
 
 class TestGetKerasTPC(unittest.TestCase):
     def test_get_keras_tpc(self):
-        tpc = mct.get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)
+        tpc = mct.get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
         input_shape = (1, 8, 8, 3)
         input_tensor = Input(shape=input_shape[1:])
         conv = Conv2D(3, 3)(input_tensor)
@@ -322,16 +322,16 @@ def rep_data():
                                                                       target_platform_capabilities=tpc)
 
     def test_get_keras_supported_version(self):
-        tpc = mct.get_target_platform_capabilities(TENSORFLOW, DEFAULT_TPC)  # Latest
+        tpc = mct.get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)  # Latest
         self.assertTrue(tpc.tpc_minor_version == 1)
 
-        tpc = mct.get_target_platform_capabilities(TENSORFLOW, IMX500_TPC, "v1")
+        tpc = mct.get_target_platform_capabilities(TENSORFLOW, IMX500_TP_MODEL, "v1")
         self.assertTrue(tpc.tpc_minor_version == 1)
 
-        tpc = mct.get_target_platform_capabilities(TENSORFLOW, TFLITE_TPC, "v1")
+        tpc = mct.get_target_platform_capabilities(TENSORFLOW, TFLITE_TP_MODEL, "v1")
         self.assertTrue(tpc.tpc_minor_version == 1)
 
-        tpc = mct.get_target_platform_capabilities(TENSORFLOW, QNNPACK_TPC, "v1")
+        tpc = mct.get_target_platform_capabilities(TENSORFLOW, QNNPACK_TP_MODEL, "v1")
         self.assertTrue(tpc.tpc_minor_version == 1)
 
     def test_get_keras_not_supported_platform(self):
@@ -341,10 +341,10 @@ def test_get_keras_not_supported_platform(self):
 
     def test_get_keras_not_supported_fw(self):
         with self.assertRaises(Exception) as e:
-            mct.get_target_platform_capabilities("ONNX", DEFAULT_TPC)
+            mct.get_target_platform_capabilities("ONNX", DEFAULT_TP_MODEL)
         self.assertTrue(e.exception)
 
     def test_get_keras_not_supported_version(self):
         with self.assertRaises(Exception) as e:
-            mct.get_target_platform_capabilities(TENSORFLOW, IMX500_TPC, "v0")
+            mct.get_target_platform_capabilities(TENSORFLOW, IMX500_TP_MODEL, "v0")
         self.assertTrue(e.exception)
diff --git a/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py b/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py
index de1e8ec46..abaeda1aa 100644
--- a/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py
+++ b/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py
@@ -28,7 +28,7 @@
 from model_compression_toolkit.core import QuantizationConfig
 from model_compression_toolkit.core.common.visualization.final_config_visualizer import \
     ActivationFinalBitwidthConfigVisualizer
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
 from model_compression_toolkit.logger import Logger
diff --git a/tests/keras_tests/xquant_tests/test_xquant_end2end.py b/tests/keras_tests/xquant_tests/test_xquant_end2end.py
index f03b60f33..1b7bcb14c 100644
--- a/tests/keras_tests/xquant_tests/test_xquant_end2end.py
+++ b/tests/keras_tests/xquant_tests/test_xquant_end2end.py
@@ -28,7 +28,7 @@
 import model_compression_toolkit as mct
 from mct_quantizers import KerasQuantizationWrapper
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
 from model_compression_toolkit.xquant.common.similarity_functions import DEFAULT_SIMILARITY_METRICS_NAMES
 from model_compression_toolkit.xquant.common.xquant_config import XQuantConfig
 
diff --git a/tests/pytorch_tests/exporter_tests/base_pytorch_export_test.py b/tests/pytorch_tests/exporter_tests/base_pytorch_export_test.py
index b98101512..2cd943dcb 100644
--- a/tests/pytorch_tests/exporter_tests/base_pytorch_export_test.py
+++ b/tests/pytorch_tests/exporter_tests/base_pytorch_export_test.py
@@ -23,7 +23,7 @@
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
 from model_compression_toolkit.exporter.model_exporter.pytorch.pytorch_export_facade import DEFAULT_ONNX_OPSET_VERSION
 
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
     generate_pytorch_tpc
 from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
@@ -41,7 +41,7 @@ def get_dataset(self):
         yield [to_torch_tensor(np.random.rand(*shape)).to(get_working_device()) for shape in self.get_input_shapes()]
 
     def get_tpc(self):
-        return mct.get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
+        return mct.get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
 
     def get_serialization_format(self):
         raise NotImplemented
diff --git a/tests/pytorch_tests/function_tests/layer_fusing_test.py b/tests/pytorch_tests/function_tests/layer_fusing_test.py
index 1b859ec12..8065ce500 100644
--- a/tests/pytorch_tests/function_tests/layer_fusing_test.py
+++ b/tests/pytorch_tests/function_tests/layer_fusing_test.py
@@ -23,7 +23,7 @@
 from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
 from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
     get_op_quantization_configs
diff --git a/tests/pytorch_tests/function_tests/set_layer_to_bitwidth_test.py b/tests/pytorch_tests/function_tests/set_layer_to_bitwidth_test.py
index 7b6ca5d26..a86f96df5 100644
--- a/tests/pytorch_tests/function_tests/set_layer_to_bitwidth_test.py
+++ b/tests/pytorch_tests/function_tests/set_layer_to_bitwidth_test.py
@@ -25,7 +25,7 @@
 from model_compression_toolkit.core.pytorch.mixed_precision.configurable_weights_quantizer import \
     ConfigurableWeightsQuantizer
 from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs
 from tests.common_tests.helpers.generate_test_tpc import generate_mixed_precision_test_tpc
diff --git a/tests/pytorch_tests/function_tests/test_activation_quantization_holder_gptq.py b/tests/pytorch_tests/function_tests/test_activation_quantization_holder_gptq.py
index fae9b7f91..c94539dbc 100644
--- a/tests/pytorch_tests/function_tests/test_activation_quantization_holder_gptq.py
+++ b/tests/pytorch_tests/function_tests/test_activation_quantization_holder_gptq.py
@@ -14,7 +14,7 @@
 from model_compression_toolkit.gptq.pytorch.gptq_training import PytorchGPTQTrainer
 from model_compression_toolkit.gptq.common.gradual_activation_quantization import \
     GradualActivationQuantizerWrapper
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 from model_compression_toolkit.trainable_infrastructure import TrainingMethod
diff --git a/tests/pytorch_tests/function_tests/test_export_pytorch_fully_quantized_model.py b/tests/pytorch_tests/function_tests/test_export_pytorch_fully_quantized_model.py
index 4bc46d1fe..d5f98dcd4 100644
--- a/tests/pytorch_tests/function_tests/test_export_pytorch_fully_quantized_model.py
+++ b/tests/pytorch_tests/function_tests/test_export_pytorch_fully_quantized_model.py
@@ -25,14 +25,14 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.constants import PYTORCH
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
 from model_compression_toolkit.exporter import pytorch_export_model
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit import get_target_platform_capabilities
 
-DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
+DEFAULT_PYTORCH_TPC = get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
 
 _, SAVED_MODEL_PATH_PTH = tempfile.mkstemp('.pth')
 _, SAVED_MODEL_PATH_ONNX = tempfile.mkstemp('.onnx')
diff --git a/tests/pytorch_tests/function_tests/test_hessian_info_calculator.py b/tests/pytorch_tests/function_tests/test_hessian_info_calculator.py
index f7b383422..c2e2c6ced 100644
--- a/tests/pytorch_tests/function_tests/test_hessian_info_calculator.py
+++ b/tests/pytorch_tests/function_tests/test_hessian_info_calculator.py
@@ -18,12 +18,11 @@
 
 from model_compression_toolkit.core.pytorch.constants import KERNEL
 from model_compression_toolkit.core.pytorch.data_util import data_gen_to_dataloader
-from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
 import numpy as np
 
 from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
 from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_configs
diff --git a/tests/pytorch_tests/function_tests/test_hessian_service.py b/tests/pytorch_tests/function_tests/test_hessian_service.py
index 244b7d88a..ab0e5bcac 100644
--- a/tests/pytorch_tests/function_tests/test_hessian_service.py
+++ b/tests/pytorch_tests/function_tests/test_hessian_service.py
@@ -13,8 +13,6 @@
 # limitations under the License.
 # ==============================================================================
 
-import unittest
-
 from torch import nn
 import numpy as np
 
@@ -23,7 +21,7 @@
 from model_compression_toolkit.core.pytorch.data_util import data_gen_to_dataloader
 from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
 from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_configs
diff --git a/tests/pytorch_tests/function_tests/test_pytorch_tpc.py b/tests/pytorch_tests/function_tests/test_pytorch_tpc.py
index 8875d47c8..be89034b3 100644
--- a/tests/pytorch_tests/function_tests/test_pytorch_tpc.py
+++ b/tests/pytorch_tests/function_tests/test_pytorch_tpc.py
@@ -33,8 +33,8 @@
     LayerFilterParams
 from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import \
     Greater, Smaller, Eq
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC, IMX500_TPC, \
-    TFLITE_TPC, QNNPACK_TPC, KERNEL_ATTR, WEIGHTS_N_BITS, PYTORCH_KERNEL, BIAS_ATTR, BIAS
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL, IMX500_TP_MODEL, \
+    TFLITE_TP_MODEL, QNNPACK_TP_MODEL, KERNEL_ATTR, WEIGHTS_N_BITS, PYTORCH_KERNEL, BIAS_ATTR, BIAS
 from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
 from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs
 from tests.pytorch_tests.layer_tests.base_pytorch_layer_test import LayerTestModel
@@ -276,7 +276,7 @@ def test_pytorch_fusing_patterns(self):
 class TestGetPytorchTPC(unittest.TestCase):
 
     def test_get_pytorch_models(self):
-        tpc = mct.get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
+        tpc = mct.get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
         model = mobilenet_v2(pretrained=True)
 
         def rep_data():
@@ -298,16 +298,16 @@ def rep_data():
                                                                         core_config=core_config)
 
     def test_get_pytorch_supported_version(self):
-        tpc = mct.get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)  # Latest
+        tpc = mct.get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)  # Latest
         self.assertTrue(tpc.tpc_minor_version == 1)
 
-        tpc = mct.get_target_platform_capabilities(PYTORCH, IMX500_TPC, "v1")
+        tpc = mct.get_target_platform_capabilities(PYTORCH, IMX500_TP_MODEL, "v1")
         self.assertTrue(tpc.tpc_minor_version == 1)
 
-        tpc = mct.get_target_platform_capabilities(PYTORCH, TFLITE_TPC, "v1")
+        tpc = mct.get_target_platform_capabilities(PYTORCH, TFLITE_TP_MODEL, "v1")
         self.assertTrue(tpc.tpc_minor_version == 1)
 
-        tpc = mct.get_target_platform_capabilities(PYTORCH, QNNPACK_TPC, "v1")
+        tpc = mct.get_target_platform_capabilities(PYTORCH, QNNPACK_TP_MODEL, "v1")
         self.assertTrue(tpc.tpc_minor_version == 1)
 
     def test_get_pytorch_not_supported_platform(self):
@@ -317,12 +317,12 @@ def test_get_pytorch_not_supported_platform(self):
 
     def test_get_pytorch_not_supported_fw(self):
         with self.assertRaises(Exception) as e:
-            mct.get_target_platform_capabilities("ONNX", DEFAULT_TPC)
+            mct.get_target_platform_capabilities("ONNX", DEFAULT_TP_MODEL)
         self.assertTrue(e.exception)
 
     def test_get_pytorch_not_supported_version(self):
         with self.assertRaises(Exception) as e:
-            mct.get_target_platform_capabilities(PYTORCH, IMX500_TPC, "v0")
+            mct.get_target_platform_capabilities(PYTORCH, IMX500_TP_MODEL, "v0")
         self.assertTrue(e.exception)
 
 
diff --git a/tests/pytorch_tests/function_tests/test_quant_config_filtering.py b/tests/pytorch_tests/function_tests/test_quant_config_filtering.py
index 12850512d..007095842 100644
--- a/tests/pytorch_tests/function_tests/test_quant_config_filtering.py
+++ b/tests/pytorch_tests/function_tests/test_quant_config_filtering.py
@@ -22,7 +22,7 @@
     QuantizationConfigOptions
 from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import \
     get_config_options_by_operators_set
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
 from tests.common_tests.helpers.generate_test_tpc import generate_custom_test_tpc
 from tests.common_tests.helpers.tpcs_for_tests.v3.tpc import get_tpc
diff --git a/tests/pytorch_tests/function_tests/test_sensitivity_eval_non_supported_output.py b/tests/pytorch_tests/function_tests/test_sensitivity_eval_non_supported_output.py
index a9b4724e1..2a57e8e95 100644
--- a/tests/pytorch_tests/function_tests/test_sensitivity_eval_non_supported_output.py
+++ b/tests/pytorch_tests/function_tests/test_sensitivity_eval_non_supported_output.py
@@ -20,7 +20,7 @@
 from model_compression_toolkit.core.common.hessian import HessianInfoService
 from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
 from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
 from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_quantization_parameters
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
diff --git a/tests/pytorch_tests/model_tests/base_pytorch_feature_test.py b/tests/pytorch_tests/model_tests/base_pytorch_feature_test.py
index 266bacd27..0e2f0cfe7 100644
--- a/tests/pytorch_tests/model_tests/base_pytorch_feature_test.py
+++ b/tests/pytorch_tests/model_tests/base_pytorch_feature_test.py
@@ -14,7 +14,7 @@
 # ==============================================================================
 from model_compression_toolkit.constants import PYTORCH
 from model_compression_toolkit.core.common.framework_implementation import FrameworkImplementation
-from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 from model_compression_toolkit.core import FrameworkInfo
 from model_compression_toolkit.ptq import pytorch_post_training_quantization
 from model_compression_toolkit import get_target_platform_capabilities
@@ -38,7 +38,7 @@ def __init__(self,
                          input_shape=input_shape)
 
     def get_tpc(self):
-        return get_target_platform_capabilities(PYTORCH, DEFAULT_TPC)
+        return get_target_platform_capabilities(PYTORCH, DEFAULT_TP_MODEL)
 
     def get_ptq_facade(self):
         return pytorch_post_training_quantization
diff --git a/tests/pytorch_tests/model_tests/feature_models/activation_16bit_test.py b/tests/pytorch_tests/model_tests/feature_models/activation_16bit_test.py
index ec035b618..cda9c3f73 100644
--- a/tests/pytorch_tests/model_tests/feature_models/activation_16bit_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/activation_16bit_test.py
@@ -18,7 +18,7 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit.constants import PYTORCH
 from model_compression_toolkit.core import MixedPrecisionQuantizationConfig
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OperatorSetNames, \
     QuantizationConfigOptions
 from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import \
diff --git a/tests/pytorch_tests/model_tests/feature_models/compute_max_cut_test.py b/tests/pytorch_tests/model_tests/feature_models/compute_max_cut_test.py
index 1e43ad43e..406957445 100644
--- a/tests/pytorch_tests/model_tests/feature_models/compute_max_cut_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/compute_max_cut_test.py
@@ -17,7 +17,7 @@
 import model_compression_toolkit as mct
 from tests.common_tests.helpers.tpcs_for_tests.v2.tpc import get_tpc
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
 from model_compression_toolkit.constants import PYTORCH
 from mct_quantizers.pytorch.metadata import get_metadata
 
diff --git a/tests/pytorch_tests/model_tests/feature_models/metadata_test.py b/tests/pytorch_tests/model_tests/feature_models/metadata_test.py
index 5f9528087..f2accc9a4 100644
--- a/tests/pytorch_tests/model_tests/feature_models/metadata_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/metadata_test.py
@@ -21,7 +21,7 @@
 from tests.common_tests.helpers.tpcs_for_tests.v2.tpc import get_tpc
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
 from model_compression_toolkit.constants import PYTORCH
 from mct_quantizers import PytorchQuantizationWrapper
 from mct_quantizers.pytorch.metadata import add_metadata, get_metadata, add_onnx_metadata, get_onnx_metadata
diff --git a/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py b/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py
index 1ac400c7c..4ed30dbe6 100644
--- a/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py
@@ -14,7 +14,7 @@
 # ==============================================================================
 import copy
 import random
-from typing import Callable, List, Tuple
+from typing import Callable, Tuple
 
 import numpy as np
 import torch
@@ -34,7 +34,7 @@
     pytorch_apply_second_moment_correction
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, set_model
 from model_compression_toolkit.core.runner import core_runner
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2pytorch import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
 from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
diff --git a/tests/pytorch_tests/model_tests/feature_models/tpc_test.py b/tests/pytorch_tests/model_tests/feature_models/tpc_test.py
index 3510fd80f..aa65d3e66 100644
--- a/tests/pytorch_tests/model_tests/feature_models/tpc_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/tpc_test.py
@@ -20,7 +20,7 @@
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, torch_tensor_to_numpy, set_model
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
 from model_compression_toolkit.constants import PYTORCH
 from mct_quantizers import PytorchQuantizationWrapper
 from mct_quantizers.pytorch.metadata import add_metadata, get_metadata, add_onnx_metadata, get_onnx_metadata
diff --git a/tests/pytorch_tests/model_tests/test_feature_models_runner.py b/tests/pytorch_tests/model_tests/test_feature_models_runner.py
index e7b5ee2e8..685403b42 100644
--- a/tests/pytorch_tests/model_tests/test_feature_models_runner.py
+++ b/tests/pytorch_tests/model_tests/test_feature_models_runner.py
@@ -783,9 +783,9 @@ def test_metadata(self):
         MetadataTest(self).run_test()
 
     def test_torch_tpcs(self):
-        TpcTest(f'{C.IMX500_TPC}.v1', self).run_test()
-        TpcTest(f'{C.TFLITE_TPC}.v1', self).run_test()
-        TpcTest(f'{C.QNNPACK_TPC}.v1', self).run_test()
+        TpcTest(f'{C.IMX500_TP_MODEL}.v1', self).run_test()
+        TpcTest(f'{C.TFLITE_TP_MODEL}.v1', self).run_test()
+        TpcTest(f'{C.QNNPACK_TP_MODEL}.v1', self).run_test()
 
     def test_16bit_activations(self):
         Activation16BitTest(self).run_test()
diff --git a/tests/pytorch_tests/xquant_tests/test_xquant_end2end.py b/tests/pytorch_tests/xquant_tests/test_xquant_end2end.py
index 483a14978..39ffb5500 100644
--- a/tests/pytorch_tests/xquant_tests/test_xquant_end2end.py
+++ b/tests/pytorch_tests/xquant_tests/test_xquant_end2end.py
@@ -34,7 +34,7 @@
 import model_compression_toolkit as mct
 from mct_quantizers import PytorchQuantizationWrapper
 from model_compression_toolkit.constants import PYTORCH
-from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TPC
+from model_compression_toolkit.target_platform_capabilities.constants import IMX500_TP_MODEL
 from model_compression_toolkit.xquant.common.similarity_functions import DEFAULT_SIMILARITY_METRICS_NAMES
 from model_compression_toolkit.xquant.common.xquant_config import XQuantConfig
 from model_compression_toolkit.xquant.pytorch.facade_xquant_report import xquant_report_pytorch_experimental

From aefa143690c485650704ba00787fa7127d0d1148 Mon Sep 17 00:00:00 2001
From: liord <lior.dikstein@altair-semi.com>
Date: Mon, 13 Jan 2025 12:13:34 +0200
Subject: [PATCH 09/18] Remove folder "target_platform"

---
 model_compression_toolkit/__init__.py         |  1 -
 .../core/common/framework_info.py             |  4 +-
 .../core/common/fusion/layer_fusing.py        |  3 +-
 .../core/common/graph/base_graph.py           |  5 +-
 .../core/common/graph/base_node.py            |  4 +-
 .../mixed_precision_candidates_filter.py      |  3 +-
 .../resource_utilization_data.py              |  3 +-
 .../common/pruning/greedy_mask_calculator.py  |  3 +-
 .../common/pruning/mask/per_channel_mask.py   |  1 -
 .../pruning/mask/per_simd_group_mask.py       |  1 -
 .../core/common/pruning/pruner.py             |  4 +-
 .../quantization/filter_nodes_candidates.py   |  3 +-
 .../quantization/quantization_fn_selection.py |  2 +-
 .../quantization_params_fn_selection.py       |  2 +-
 .../error_functions.py                        |  2 +-
 .../power_of_two_selection.py                 |  2 +-
 .../qparams_activations_computation.py        |  2 +-
 .../symmetric_selection.py                    |  2 +-
 .../uniform_selection.py                      |  2 +-
 .../set_node_quantization_config.py           |  3 +-
 .../substitutions/batchnorm_reconstruction.py |  2 +-
 .../substitutions/batchnorm_refusing.py       |  2 +-
 .../shift_negative_activation.py              |  2 +-
 .../core/graph_prep_runner.py                 |  3 +-
 .../core/keras/default_framework_info.py      |  2 +-
 .../configurable_weights_quantizer.py         |  3 +-
 .../keras/resource_utilization_data_facade.py |  5 +-
 .../core/pytorch/default_framework_info.py    |  2 +-
 .../configurable_activation_quantizer.py      |  2 +-
 .../configurable_weights_quantizer.py         |  2 +-
 model_compression_toolkit/core/runner.py      |  3 +-
 .../keras/builder/node_to_quantizer.py        |  2 +-
 .../pytorch/builder/node_to_quantizer.py      |  2 +-
 .../gptq/keras/quantization_facade.py         |  5 +-
 .../soft_rounding/symmetric_soft_quantizer.py |  2 +-
 .../soft_rounding/uniform_soft_quantizer.py   |  2 +-
 .../quantizer/ste_rounding/symmetric_ste.py   |  2 +-
 .../soft_rounding/symmetric_soft_quantizer.py |  2 +-
 .../soft_rounding/uniform_soft_quantizer.py   |  2 +-
 .../quantizer/ste_rounding/symmetric_ste.py   |  2 +-
 model_compression_toolkit/metadata.py         |  3 +-
 .../pruning/keras/pruning_facade.py           |  5 +-
 .../ptq/keras/quantization_facade.py          |  5 +-
 model_compression_toolkit/qat/keras/README.md |  2 +-
 .../qat/keras/quantization_facade.py          |  5 +-
 .../qat/keras/quantizer/lsq/symmetric_lsq.py  |  2 +-
 .../quantizer/ste_rounding/symmetric_ste.py   |  2 +-
 .../qat/pytorch/quantizer/DQA/dqa_uniform.py  |  2 +-
 .../pytorch/quantizer/lsq/symmetric_lsq.py    |  2 +-
 .../qat/pytorch/quantizer/lsq/uniform_lsq.py  |  2 +-
 .../quantizer/ste_rounding/symmetric_ste.py   |  2 +-
 .../quantizer/ste_rounding/uniform_ste.py     |  2 +-
 .../targetplatform2framework/__init__.py      |  6 +--
 .../targetplatform2framework/attach2fw.py     |  4 +-
 .../targetplatform2framework/attach2keras.py  |  6 +--
 .../attach2pytorch.py                         |  4 +-
 .../framework_quantization_capabilities.py    | 10 ++--
 ...ork_quantization_capabilities_component.py |  2 +-
 .../layer_filter_params.py                    |  1 -
 .../operations_to_layers.py                   |  4 +-
 .../tpc_models/imx500_tpc/v1/tpc.py           | 13 +++--
 .../tpc_models/qnnpack_tpc/v1/tpc.py          | 11 ++--
 .../tpc_models/tflite_tpc/v1/tpc.py           | 11 ++--
 .../common/get_quantizers.py                  |  3 +-
 .../common/trainable_quantizer_config.py      |  3 +-
 .../lsq/symmetric_lsq.py                      |  3 +-
 .../keras/config_serialization.py             |  2 +-
 .../xquant/common/model_folding_utils.py      |  3 +-
 .../xquant/keras/keras_report_utils.py        |  4 +-
 .../common_tests/helpers/generate_test_tpc.py | 18 ++++---
 .../helpers/prep_graph_for_func_test.py       |  2 -
 .../helpers/tpcs_for_tests/v1/tpc.py          | 11 ++--
 .../helpers/tpcs_for_tests/v1_lut/tpc.py      | 15 +++---
 .../helpers/tpcs_for_tests/v1_pot/tpc.py      | 11 ++--
 .../helpers/tpcs_for_tests/v2/tpc.py          | 11 ++--
 .../helpers/tpcs_for_tests/v2_lut/tpc.py      | 15 +++---
 .../helpers/tpcs_for_tests/v3/tpc.py          | 13 +++--
 .../helpers/tpcs_for_tests/v3_lut/tpc.py      | 17 +++----
 .../helpers/tpcs_for_tests/v4/tpc.py          | 15 +++---
 tests/common_tests/test_tpc.py                |  1 -
 .../models_tests/test_networks_runner.py      |  1 -
 .../tflite_int8/imx500_int8_tpc.py            | 33 ++++++------
 .../tflite_int8/networks/conv2d_test.py       |  2 +-
 .../tflite_int8/networks/mobilenetv2_test.py  |  2 +-
 .../bn_attributes_quantization_test.py        |  2 -
 .../feature_networks/bn_folding_test.py       |  1 -
 .../const_quantization_test.py                |  3 +-
 .../const_representation_test.py              |  1 -
 .../feature_networks/gptq/gptq_conv.py        |  1 -
 .../feature_networks/gptq/gptq_test.py        |  3 +-
 .../linear_collapsing_test.py                 |  1 -
 .../feature_networks/lut_quantizer.py         |  7 ++-
 .../feature_networks/metadata_test.py         |  1 -
 .../requires_mixed_precision_test.py          |  2 +-
 .../feature_networks/mixed_precision_tests.py |  3 +-
 .../network_editor/edit_qc_test.py            |  4 +-
 .../network_editor/node_filter_test.py        | 11 ++--
 .../feature_networks/qat/qat_test.py          |  8 +--
 .../residual_collapsing_test.py               |  1 -
 .../second_moment_correction_test.py          |  6 +--
 ...ric_threshold_selection_activation_test.py |  3 +-
 .../feature_networks/test_kmeans_quantizer.py | 14 +++---
 .../feature_networks/tpc_test.py              |  1 -
 ...uniform_range_selection_activation_test.py |  3 +-
 .../weights_mixed_precision_tests.py          |  1 -
 .../test_features_runner.py                   |  2 +-
 ...est_activation_quantization_holder_gptq.py |  3 +-
 ...vation_weights_composition_substitution.py |  3 +-
 .../test_cfg_candidates_filter.py             |  3 +-
 .../function_tests/test_custom_layer.py       |  5 +-
 .../function_tests/test_get_gptq_config.py    |  2 +-
 .../test_gptq_soft_quantizer.py               |  3 +-
 .../function_tests/test_graph_max_cut.py      |  1 -
 .../test_hessian_info_calculator.py           |  3 +-
 .../function_tests/test_hessian_service.py    |  2 +-
 .../function_tests/test_hmse_error_method.py  | 19 ++++---
 ...st_kl_error_quantization_configurations.py |  6 +--
 .../function_tests/test_layer_fusing.py       |  7 ++-
 .../test_quant_config_filtering.py            |  2 +-
 .../test_quantization_configurations.py       |  6 +--
 ..._sensitivity_eval_non_suppoerted_output.py |  2 +-
 ...test_sensitivity_metric_interest_points.py |  5 +-
 .../test_set_layer_to_bitwidth.py             |  4 +-
 ...t_symmetric_threshold_selection_weights.py |  4 +-
 .../test_uniform_range_selection_weights.py   |  4 +-
 ...t_weights_activation_split_substitution.py |  5 +-
 .../non_parallel_tests/test_keras_tpc.py      | 50 +++++++++----------
 .../test_lp_search_bitwidth.py                |  2 +-
 .../test_tensorboard_writer.py                |  2 +-
 .../pruning_tests/test_memory_calculator.py   |  2 +-
 tests/keras_tests/tpc_keras.py                |  5 +-
 .../base_keras_trainable_infra_test.py        |  3 +-
 .../test_keras_trainable_infra_runner.py      |  2 +-
 .../test_keras_base_quantizer.py              |  2 +-
 .../function_tests/get_gptq_config_test.py    |  3 +-
 .../function_tests/layer_fusing_test.py       |  3 +-
 .../test_fully_quantized_exporter.py          |  1 -
 .../function_tests/test_function_runner.py    |  2 +-
 .../test_gptq_soft_quantizer.py               |  4 +-
 .../function_tests/test_pytorch_tpc.py        | 13 +++--
 .../test_quantization_configurations.py       |  6 +--
 .../bn_attributes_quantization_test.py        |  1 -
 .../feature_models/compute_max_cut_test.py    |  1 -
 .../feature_models/const_quantization_test.py |  9 ++--
 .../const_representation_test.py              |  1 -
 .../constant_conv_substitution_test.py        |  1 -
 .../feature_models/conv2d_replacement_test.py |  1 -
 .../model_tests/feature_models/gptq_test.py   |  4 +-
 .../feature_models/linear_collapsing_test.py  |  1 -
 .../feature_models/lut_quantizer_test.py      |  5 +-
 .../feature_models/metadata_test.py           |  2 -
 .../mixed_precision_activation_test.py        |  1 -
 .../mixed_precision_weights_test.py           |  3 --
 .../multi_head_attention_test.py              |  1 -
 .../permute_substitution_test.py              |  3 --
 .../model_tests/feature_models/qat_test.py    |  4 +-
 .../feature_models/relu_replacement_test.py   |  2 -
 .../reshape_substitution_test.py              |  3 --
 .../residual_collapsing_test.py               |  2 -
 .../second_moment_correction_test.py          |  5 +-
 .../symmetric_activation_test.py              |  2 +-
 .../model_tests/feature_models/tpc_test.py    |  2 -
 .../feature_models/uniform_activation_test.py |  2 +-
 .../model_tests/test_feature_models_runner.py | 19 ++++---
 tests/pytorch_tests/tpc_pytorch.py            |  2 -
 .../base_pytorch_trainable_infra_test.py      |  2 +-
 .../test_pytorch_trainable_infra_runner.py    |  2 +-
 .../test_pytorch_base_quantizer.py            |  2 +-
 168 files changed, 330 insertions(+), 413 deletions(-)

diff --git a/model_compression_toolkit/__init__.py b/model_compression_toolkit/__init__.py
index 4d45628ef..723328b9b 100644
--- a/model_compression_toolkit/__init__.py
+++ b/model_compression_toolkit/__init__.py
@@ -14,7 +14,6 @@
 # ==============================================================================
 
 from model_compression_toolkit.defaultdict import DefaultDict
-from model_compression_toolkit.target_platform_capabilities import target_platform
 from model_compression_toolkit.target_platform_capabilities.tpc_models.get_target_platform_capabilities import get_target_platform_capabilities
 from model_compression_toolkit import core
 from model_compression_toolkit.logger import set_log_folder
diff --git a/model_compression_toolkit/core/common/framework_info.py b/model_compression_toolkit/core/common/framework_info.py
index 790773d8b..c80ecf4e3 100644
--- a/model_compression_toolkit/core/common/framework_info.py
+++ b/model_compression_toolkit/core/common/framework_info.py
@@ -18,10 +18,8 @@
 from enum import Enum
 from typing import Dict, Any, List
 
-
-
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.defaultdict import DefaultDict
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 
 
 class ChannelAxis(Enum):
diff --git a/model_compression_toolkit/core/common/fusion/layer_fusing.py b/model_compression_toolkit/core/common/fusion/layer_fusing.py
index e76aad1a3..1f2981eb3 100644
--- a/model_compression_toolkit/core/common/fusion/layer_fusing.py
+++ b/model_compression_toolkit/core/common/fusion/layer_fusing.py
@@ -16,7 +16,8 @@
 from typing import Any, List
 from model_compression_toolkit.core.common.graph.base_graph import Graph
 from model_compression_toolkit.core.common.graph.base_node import BaseNode
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \
+    FrameworkQuantizationCapabilities
 from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.layer_filter_params import LayerFilterParams
 
 
diff --git a/model_compression_toolkit/core/common/graph/base_graph.py b/model_compression_toolkit/core/common/graph/base_graph.py
index 2b64787b2..580d5103c 100644
--- a/model_compression_toolkit/core/common/graph/base_graph.py
+++ b/model_compression_toolkit/core/common/graph/base_graph.py
@@ -32,8 +32,9 @@
 from model_compression_toolkit.core.common.pruning.pruning_section import PruningSection
 from model_compression_toolkit.core.common.user_info import UserInformation
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import \
-    FrameworkQuantizationCapabilities, LayerFilterParams
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \
+    FrameworkQuantizationCapabilities
 
 OutTensor = namedtuple('OutTensor', 'node node_out_index')
 
diff --git a/model_compression_toolkit/core/common/graph/base_node.py b/model_compression_toolkit/core/common/graph/base_node.py
index c246fb147..23b1410e2 100644
--- a/model_compression_toolkit/core/common/graph/base_node.py
+++ b/model_compression_toolkit/core/common/graph/base_node.py
@@ -25,7 +25,9 @@
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import QuantizationConfigOptions, \
     OpQuantizationConfig
 from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import max_input_activation_n_bits
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities, LayerFilterParams
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \
+    FrameworkQuantizationCapabilities
 
 
 class BaseNode:
diff --git a/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py b/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py
index f0308408b..6a5203886 100644
--- a/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py
+++ b/model_compression_toolkit/core/common/mixed_precision/mixed_precision_candidates_filter.py
@@ -17,7 +17,8 @@
 from model_compression_toolkit.core import ResourceUtilization, FrameworkInfo
 from model_compression_toolkit.core.common import Graph
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \
+    FrameworkQuantizationCapabilities
 
 
 def filter_candidates_for_mixed_precision(graph: Graph,
diff --git a/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py b/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py
index b37067bc9..576a95386 100644
--- a/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py
+++ b/model_compression_toolkit/core/common/mixed_precision/resource_utilization_tools/resource_utilization_data.py
@@ -25,9 +25,10 @@
 from model_compression_toolkit.core.common.framework_implementation import FrameworkImplementation
 from model_compression_toolkit.core.common.graph.edge import EDGE_SINK_INDEX
 from model_compression_toolkit.core.graph_prep_runner import graph_preparation_runner
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import QuantizationConfigOptions
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.ru_methods import calc_graph_cuts
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \
+    FrameworkQuantizationCapabilities
 
 
 def compute_resource_utilization_data(in_model: Any,
diff --git a/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py b/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py
index 7ecdedd78..c0632c664 100644
--- a/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py
+++ b/model_compression_toolkit/core/common/pruning/greedy_mask_calculator.py
@@ -24,7 +24,8 @@
 from model_compression_toolkit.core.common.pruning.pruning_framework_implementation import PruningFrameworkImplementation
 from model_compression_toolkit.core.common.pruning.mask.per_simd_group_mask import PerSIMDGroupMask
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \
+    FrameworkQuantizationCapabilities
 
 
 class GreedyMaskCalculator:
diff --git a/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py b/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py
index 2cbf47af5..93b7574d7 100644
--- a/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py
+++ b/model_compression_toolkit/core/common/pruning/mask/per_channel_mask.py
@@ -23,7 +23,6 @@
 from model_compression_toolkit.core.common.pruning.memory_calculator import MemoryCalculator
 from model_compression_toolkit.core.common.pruning.pruning_framework_implementation import PruningFrameworkImplementation
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 
 class MaskIndicator(Enum):
     """
diff --git a/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py b/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py
index 79c03336d..d763463fe 100644
--- a/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py
+++ b/model_compression_toolkit/core/common/pruning/mask/per_simd_group_mask.py
@@ -23,7 +23,6 @@
 from model_compression_toolkit.core.common.pruning.memory_calculator import MemoryCalculator
 from model_compression_toolkit.core.common.pruning.pruning_framework_implementation import PruningFrameworkImplementation
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 
 class PerSIMDGroupMask:
     def __init__(self,
diff --git a/model_compression_toolkit/core/common/pruning/pruner.py b/model_compression_toolkit/core/common/pruning/pruner.py
index 8e2de586a..f2b16a794 100644
--- a/model_compression_toolkit/core/common/pruning/pruner.py
+++ b/model_compression_toolkit/core/common/pruning/pruner.py
@@ -29,7 +29,9 @@
 from model_compression_toolkit.core.common.pruning.pruning_info import PruningInfo, \
     unroll_simd_scores_to_per_channel_scores
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import \
+    FrameworkQuantizationCapabilities
+
 
 class Pruner:
     """
diff --git a/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py b/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py
index 76ef891cc..beba61067 100644
--- a/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py
+++ b/model_compression_toolkit/core/common/quantization/filter_nodes_candidates.py
@@ -15,8 +15,7 @@
 import copy
 from typing import List
 
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
-
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core.common import Graph, BaseNode
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.core.common.quantization.candidate_node_quantization_config import \
diff --git a/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py b/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py
index 6318fb268..ab45a9891 100644
--- a/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py
+++ b/model_compression_toolkit/core/common/quantization/quantization_fn_selection.py
@@ -16,8 +16,8 @@
 from collections.abc import Callable
 from functools import partial
 
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.core.common.quantization.quantizers.lut_kmeans_quantizer import lut_kmeans_quantizer
 from model_compression_toolkit.core.common.quantization.quantizers.uniform_quantizers import power_of_two_quantizer, \
     symmetric_quantizer, uniform_quantizer
diff --git a/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py b/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py
index 84b790906..88eb152b6 100644
--- a/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py
+++ b/model_compression_toolkit/core/common/quantization/quantization_params_fn_selection.py
@@ -16,8 +16,8 @@
 from collections.abc import Callable
 from functools import partial
 
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.core.common.quantization.quantization_params_generation.lut_kmeans_params import \
     lut_kmeans_tensor, lut_kmeans_histogram
 from model_compression_toolkit.core.common.quantization.quantization_params_generation.symmetric_selection import \
diff --git a/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py b/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py
index 061711b13..69ad9de78 100644
--- a/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py
+++ b/model_compression_toolkit/core/common/quantization/quantization_params_generation/error_functions.py
@@ -16,11 +16,11 @@
 from typing import Tuple, Callable, List, Iterable, Optional
 import numpy as np
 import model_compression_toolkit.core.common.quantization.quantization_config as qc
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core.common.hessian import HessianScoresRequest, HessianMode, HessianScoresGranularity, \
     HessianInfoService
 from model_compression_toolkit.core.common.similarity_analyzer import compute_mse, compute_mae, compute_lp_norm
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.constants import FLOAT_32, NUM_QPARAM_HESSIAN_SAMPLES
 from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import uniform_quantize_tensor, \
     reshape_tensor_for_per_channel_search
diff --git a/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py b/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py
index 2d2241424..787b70388 100644
--- a/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py
+++ b/model_compression_toolkit/core/common/quantization/quantization_params_generation/power_of_two_selection.py
@@ -16,6 +16,7 @@
 from typing import Union, Tuple, Dict
 
 import model_compression_toolkit.core.common.quantization.quantization_config as qc
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import MIN_THRESHOLD, THRESHOLD, NUM_QPARAM_HESSIAN_SAMPLES, SIGNED
 from model_compression_toolkit.core.common.hessian import HessianInfoService
 from model_compression_toolkit.core.common.quantization.quantization_params_generation.qparams_search import \
@@ -23,7 +24,6 @@
 from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import max_power_of_two, get_tensor_max
 from model_compression_toolkit.core.common.quantization.quantization_params_generation.error_functions import \
     get_threshold_selection_tensor_error_function, get_threshold_selection_histogram_error_function
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.core.common.similarity_analyzer import compute_mse
 from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import quantize_tensor
 
diff --git a/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py b/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py
index ca3d7c733..052074d3c 100644
--- a/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py
+++ b/model_compression_toolkit/core/common/quantization/quantization_params_generation/qparams_activations_computation.py
@@ -15,7 +15,7 @@
 import numpy as np
 from typing import Dict, Union
 
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import Signedness
 from model_compression_toolkit.core.common.collectors.statistics_collector import BaseStatsCollector
 from model_compression_toolkit.core.common.quantization import quantization_params_generation
diff --git a/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py b/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py
index 73cb1077d..524bd863b 100644
--- a/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py
+++ b/model_compression_toolkit/core/common/quantization/quantization_params_generation/symmetric_selection.py
@@ -25,7 +25,7 @@
     qparams_symmetric_selection_histogram_search, kl_qparams_symmetric_selection_histogram_search
 from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import \
     get_tensor_max
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core.common.similarity_analyzer import compute_mse
 from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import quantize_tensor
 
diff --git a/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py b/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py
index 624c6bd9f..d67834092 100644
--- a/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py
+++ b/model_compression_toolkit/core/common/quantization/quantization_params_generation/uniform_selection.py
@@ -24,7 +24,7 @@
     get_threshold_selection_tensor_error_function, get_threshold_selection_histogram_error_function
 from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import get_tensor_max, \
     get_tensor_min
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core.common.similarity_analyzer import compute_mse
 from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import uniform_quantize_tensor
 
diff --git a/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py b/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py
index d83e9e96f..7359cdf1c 100644
--- a/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py
+++ b/model_compression_toolkit/core/common/quantization/set_node_quantization_config.py
@@ -33,9 +33,10 @@
 from model_compression_toolkit.core.common.quantization.quantization_fn_selection import \
     get_weights_quantization_fn
 from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import max_input_activation_n_bits
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OpQuantizationConfig, \
     QuantizationConfigOptions
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \
+    FrameworkQuantizationCapabilities
 
 
 def set_quantization_configuration_to_graph(graph: Graph,
diff --git a/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py b/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py
index 256501365..1f17263a4 100644
--- a/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py
+++ b/model_compression_toolkit/core/common/substitutions/batchnorm_reconstruction.py
@@ -26,7 +26,7 @@
 from model_compression_toolkit.core.common.graph.base_graph import Graph
 from model_compression_toolkit.core.common.graph.base_node import BaseNode
 from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import AttributeQuantizationConfig
 
 
diff --git a/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py b/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py
index 1ce52016b..f49af59e7 100644
--- a/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py
+++ b/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py
@@ -22,7 +22,7 @@
 from model_compression_toolkit.core.common.graph.base_graph import Graph
 from model_compression_toolkit.core.common.graph.graph_matchers import EdgeMatcher, NodeOperationMatcher
 from model_compression_toolkit.core.common.graph.base_node import BaseNode
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import THRESHOLD, RANGE_MIN, RANGE_MAX
 from model_compression_toolkit.logger import Logger
 
diff --git a/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py b/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py
index 1db9fce20..7909cf1a6 100644
--- a/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py
+++ b/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py
@@ -22,7 +22,7 @@
 from model_compression_toolkit.core.common import FrameworkInfo, Graph, BaseNode
 from model_compression_toolkit.constants import THRESHOLD, SIGNED, SHIFT_NEGATIVE_NON_LINEAR_NUM_BITS
 from model_compression_toolkit.core.common.graph.graph_matchers import NodeOperationMatcher
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import AttributeQuantizationConfig
 from model_compression_toolkit.core.common.quantization.set_node_quantization_config import create_node_activation_qc, \
     set_quantization_configs_to_node
diff --git a/model_compression_toolkit/core/graph_prep_runner.py b/model_compression_toolkit/core/graph_prep_runner.py
index 3f9027330..78d543f15 100644
--- a/model_compression_toolkit/core/graph_prep_runner.py
+++ b/model_compression_toolkit/core/graph_prep_runner.py
@@ -29,8 +29,9 @@
 from model_compression_toolkit.core.common.substitutions.apply_substitutions import substitute
 from model_compression_toolkit.core.common.substitutions.linear_collapsing_substitution import \
     linear_collapsing_substitute
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.visualization.tensorboard_writer import TensorboardWriter
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \
+    FrameworkQuantizationCapabilities
 
 
 def graph_preparation_runner(in_model: Any,
diff --git a/model_compression_toolkit/core/keras/default_framework_info.py b/model_compression_toolkit/core/keras/default_framework_info.py
index d26efed71..7cc9990f0 100644
--- a/model_compression_toolkit/core/keras/default_framework_info.py
+++ b/model_compression_toolkit/core/keras/default_framework_info.py
@@ -26,7 +26,7 @@
 
 from model_compression_toolkit.defaultdict import DefaultDict
 from model_compression_toolkit.core.common.framework_info import FrameworkInfo
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import SOFTMAX_THRESHOLD
 from model_compression_toolkit.core.keras.constants import SOFTMAX, LINEAR, RELU, SWISH, SIGMOID, IDENTITY, TANH, SELU, \
     KERNEL, DEPTHWISE_KERNEL, GELU
diff --git a/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py b/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py
index 704066718..a71af6feb 100644
--- a/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py
+++ b/model_compression_toolkit/core/keras/mixed_precision/configurable_weights_quantizer.py
@@ -20,8 +20,7 @@
 from model_compression_toolkit.core.common.quantization.candidate_node_quantization_config import \
     CandidateNodeQuantizationConfig
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
-from mct_quantizers import QuantizationTarget
+from mct_quantizers import QuantizationTarget, QuantizationMethod
 from mct_quantizers import mark_quantizer
 
 import tensorflow as tf
diff --git a/model_compression_toolkit/core/keras/resource_utilization_data_facade.py b/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
index 493007d44..87b4a06db 100644
--- a/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
+++ b/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
@@ -19,8 +19,9 @@
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.constants import TENSORFLOW
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization_data import compute_resource_utilization_data
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
+    AttachTpcToKeras
 from model_compression_toolkit.verify_packages import FOUND_TF
 
 if FOUND_TF:
@@ -28,8 +29,6 @@
     from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
     from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
     from tensorflow.keras.models import Model
-    from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
-        AttachTpcToKeras
 
     from model_compression_toolkit import get_target_platform_capabilities
 
diff --git a/model_compression_toolkit/core/pytorch/default_framework_info.py b/model_compression_toolkit/core/pytorch/default_framework_info.py
index f3d965182..93997bb88 100644
--- a/model_compression_toolkit/core/pytorch/default_framework_info.py
+++ b/model_compression_toolkit/core/pytorch/default_framework_info.py
@@ -19,7 +19,7 @@
 
 from model_compression_toolkit.defaultdict import DefaultDict
 from model_compression_toolkit.core.common.framework_info import FrameworkInfo
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import SOFTMAX_THRESHOLD
 from model_compression_toolkit.core.pytorch.constants import KERNEL
 from model_compression_toolkit.core.pytorch.quantizer.fake_quant_builder import power_of_two_quantization, \
diff --git a/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py b/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py
index 809256a74..4164cf60f 100644
--- a/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py
+++ b/model_compression_toolkit/core/pytorch/mixed_precision/configurable_activation_quantizer.py
@@ -21,7 +21,7 @@
 from model_compression_toolkit.core.common.quantization.candidate_node_quantization_config import \
     CandidateNodeQuantizationConfig
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import QuantizationTarget
 from mct_quantizers import mark_quantizer
 
diff --git a/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py b/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py
index 6feb5e77d..8b0d2c177 100644
--- a/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py
+++ b/model_compression_toolkit/core/pytorch/mixed_precision/configurable_weights_quantizer.py
@@ -20,7 +20,7 @@
 from model_compression_toolkit.core.common.quantization.candidate_node_quantization_config import \
     CandidateNodeQuantizationConfig
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import QuantizationTarget
 
 from mct_quantizers import mark_quantizer
diff --git a/model_compression_toolkit/core/runner.py b/model_compression_toolkit/core/runner.py
index 1f3b080e6..f3b71668a 100644
--- a/model_compression_toolkit/core/runner.py
+++ b/model_compression_toolkit/core/runner.py
@@ -44,12 +44,13 @@
 from model_compression_toolkit.core.common.mixed_precision.mixed_precision_search_facade import search_bit_width
 from model_compression_toolkit.core.common.network_editors.edit_network import edit_network_graph
 from model_compression_toolkit.core.common.quantization.core_config import CoreConfig
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.visualization.final_config_visualizer import \
     WeightsFinalBitwidthConfigVisualizer, \
     ActivationFinalBitwidthConfigVisualizer
 from model_compression_toolkit.core.common.visualization.tensorboard_writer import TensorboardWriter, \
     finalize_bitwidth_in_tb
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \
+    FrameworkQuantizationCapabilities
 
 
 def core_runner(in_model: Any,
diff --git a/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py b/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py
index 68b464d57..e7d1fa303 100644
--- a/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py
+++ b/model_compression_toolkit/exporter/model_wrapper/keras/builder/node_to_quantizer.py
@@ -20,7 +20,7 @@
     NodeWeightsQuantizationConfig, NodeActivationQuantizationConfig
 
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import QuantizationTarget
 from mct_quantizers.common.get_quantizers import get_inferable_quantizer_class
 from mct_quantizers.keras.quantizers import BaseKerasInferableQuantizer
diff --git a/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py b/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py
index 9db12cd2a..e17131b28 100644
--- a/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py
+++ b/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/node_to_quantizer.py
@@ -21,7 +21,7 @@
 from model_compression_toolkit.core.common.quantization.node_quantization_config import BaseNodeQuantizationConfig, \
     NodeWeightsQuantizationConfig, NodeActivationQuantizationConfig
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import QuantizationTarget
 from mct_quantizers.common.get_quantizers import get_inferable_quantizer_class
 from mct_quantizers import \
diff --git a/model_compression_toolkit/gptq/keras/quantization_facade.py b/model_compression_toolkit/gptq/keras/quantization_facade.py
index 2da9b0bc6..a32cf6556 100644
--- a/model_compression_toolkit/gptq/keras/quantization_facade.py
+++ b/model_compression_toolkit/gptq/keras/quantization_facade.py
@@ -23,6 +23,8 @@
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.constants import TENSORFLOW, ACT_HESSIAN_DEFAULT_BATCH_SIZE, GPTQ_HESSIAN_NUM_SAMPLES
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
+    AttachTpcToKeras
 from model_compression_toolkit.verify_packages import FOUND_TF
 from model_compression_toolkit.core.common.user_info import UserInformation
 from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig, GPTQHessianScoresConfig, \
@@ -33,7 +35,6 @@
 from model_compression_toolkit.core.runner import core_runner
 from model_compression_toolkit.gptq.runner import gptq_runner
 from model_compression_toolkit.core.analyzer import analyzer_model_quantization
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.metadata import create_model_metadata
 
 
@@ -48,8 +49,6 @@
     from model_compression_toolkit.exporter.model_wrapper import get_exportable_keras_model
     from model_compression_toolkit import get_target_platform_capabilities
     from mct_quantizers.keras.metadata import add_metadata
-    from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
-        AttachTpcToKeras
 
     # As from TF2.9 optimizers package is changed
     if version.parse(tf.__version__) < version.parse("2.9"):
diff --git a/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py b/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py
index e3fb198fe..4900e795e 100644
--- a/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py
+++ b/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/symmetric_soft_quantizer.py
@@ -18,7 +18,7 @@
 
 from model_compression_toolkit.gptq import RoundingType
 from model_compression_toolkit.core.common import max_power_of_two
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import QuantizationTarget
 from model_compression_toolkit.gptq.common.gptq_constants import PTQ_THRESHOLD, SCALE_PTQ, \
     SOFT_ROUNDING_GAMMA, SOFT_ROUNDING_ZETA, AUXVAR
diff --git a/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py b/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py
index 2d5f8d7b9..0445e9d1a 100644
--- a/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py
+++ b/model_compression_toolkit/gptq/keras/quantizer/soft_rounding/uniform_soft_quantizer.py
@@ -18,7 +18,7 @@
 
 from model_compression_toolkit.gptq import RoundingType
 from model_compression_toolkit.trainable_infrastructure.common.constants import FQ_MIN, FQ_MAX
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import QuantizationTarget
 from model_compression_toolkit.gptq.common.gptq_constants import \
     SOFT_ROUNDING_GAMMA, SOFT_ROUNDING_ZETA, AUXVAR
diff --git a/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py b/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py
index 2ed2d6d71..c39721a60 100644
--- a/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py
+++ b/model_compression_toolkit/gptq/keras/quantizer/ste_rounding/symmetric_ste.py
@@ -19,7 +19,7 @@
 import tensorflow as tf
 
 from model_compression_toolkit.gptq import RoundingType
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import QuantizationTarget
 from model_compression_toolkit.gptq.common.gptq_constants import AUXVAR, PTQ_THRESHOLD
 from model_compression_toolkit.gptq.keras.quantizer import quant_utils as qutils
diff --git a/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py b/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py
index 4724c2036..a4a48f7e2 100644
--- a/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py
+++ b/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/symmetric_soft_quantizer.py
@@ -18,7 +18,7 @@
 import numpy as np
 
 from model_compression_toolkit.core.common import max_power_of_two
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import QuantizationTarget, PytorchQuantizationWrapper
 from model_compression_toolkit.gptq.common.gptq_config import RoundingType
 from model_compression_toolkit.gptq.pytorch.quantizer.base_pytorch_gptq_quantizer import \
diff --git a/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py b/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py
index 8d68fb465..9f63ecffa 100644
--- a/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py
+++ b/model_compression_toolkit/gptq/pytorch/quantizer/soft_rounding/uniform_soft_quantizer.py
@@ -18,7 +18,7 @@
 import numpy as np
 
 from model_compression_toolkit.trainable_infrastructure.common.constants import FQ_MIN, FQ_MAX
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import QuantizationTarget, PytorchQuantizationWrapper
 from model_compression_toolkit.gptq.common.gptq_config import RoundingType
 from model_compression_toolkit.gptq.pytorch.quantizer.base_pytorch_gptq_quantizer import \
diff --git a/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py b/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py
index 38e2041e0..79ed406c5 100644
--- a/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py
+++ b/model_compression_toolkit/gptq/pytorch/quantizer/ste_rounding/symmetric_ste.py
@@ -18,7 +18,7 @@
 import numpy as np
 from model_compression_toolkit.defaultdict import DefaultDict
 
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import QuantizationTarget, PytorchQuantizationWrapper
 from model_compression_toolkit.gptq.common.gptq_config import RoundingType
 from model_compression_toolkit.gptq.pytorch.quantizer.base_pytorch_gptq_quantizer import \
diff --git a/model_compression_toolkit/metadata.py b/model_compression_toolkit/metadata.py
index f223c5f29..c200d2036 100644
--- a/model_compression_toolkit/metadata.py
+++ b/model_compression_toolkit/metadata.py
@@ -18,7 +18,8 @@
 from model_compression_toolkit.constants import OPERATORS_SCHEDULING, FUSED_NODES_MAPPING, CUTS, MAX_CUT, OP_ORDER, \
     OP_RECORD, SHAPE, NODE_OUTPUT_INDEX, NODE_NAME, TOTAL_SIZE, MEM_ELEMENTS
 from model_compression_toolkit.core.common.graph.memory_graph.compute_graph_max_cut import SchedulerInfo
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \
+    FrameworkQuantizationCapabilities
 
 
 def create_model_metadata(fqc: FrameworkQuantizationCapabilities,
diff --git a/model_compression_toolkit/pruning/keras/pruning_facade.py b/model_compression_toolkit/pruning/keras/pruning_facade.py
index beda9c3f2..b6e3cdc0b 100644
--- a/model_compression_toolkit/pruning/keras/pruning_facade.py
+++ b/model_compression_toolkit/pruning/keras/pruning_facade.py
@@ -18,6 +18,8 @@
 from model_compression_toolkit import get_target_platform_capabilities
 from model_compression_toolkit.constants import TENSORFLOW
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
+    AttachTpcToKeras
 from model_compression_toolkit.verify_packages import FOUND_TF
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.core.common.pruning.pruner import Pruner
@@ -26,7 +28,6 @@
 from model_compression_toolkit.core.common.quantization.set_node_quantization_config import set_quantization_configuration_to_graph
 from model_compression_toolkit.core.graph_prep_runner import read_model_to_graph
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.common.quantization.quantization_config import DEFAULTCONFIG
 from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 
@@ -35,8 +36,6 @@
     from model_compression_toolkit.core.keras.pruning.pruning_keras_implementation import PruningKerasImplementation
     from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
     from tensorflow.keras.models import Model
-    from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
-        AttachTpcToKeras
 
     DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
 
diff --git a/model_compression_toolkit/ptq/keras/quantization_facade.py b/model_compression_toolkit/ptq/keras/quantization_facade.py
index 3f0e960b1..3c93b8f37 100644
--- a/model_compression_toolkit/ptq/keras/quantization_facade.py
+++ b/model_compression_toolkit/ptq/keras/quantization_facade.py
@@ -23,11 +23,12 @@
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.constants import TENSORFLOW
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
+    AttachTpcToKeras
 from model_compression_toolkit.verify_packages import FOUND_TF
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
     MixedPrecisionQuantizationConfig
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.runner import core_runner
 from model_compression_toolkit.ptq.runner import ptq_runner
 from model_compression_toolkit.metadata import create_model_metadata
@@ -42,8 +43,6 @@
 
     from model_compression_toolkit import get_target_platform_capabilities
     from mct_quantizers.keras.metadata import add_metadata
-    from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
-        AttachTpcToKeras
 
     DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
 
diff --git a/model_compression_toolkit/qat/keras/README.md b/model_compression_toolkit/qat/keras/README.md
index f1cf6593e..42c096bea 100644
--- a/model_compression_toolkit/qat/keras/README.md
+++ b/model_compression_toolkit/qat/keras/README.md
@@ -72,7 +72,7 @@ import tensorflow as tf
 
 from mct_quantizers.keras.quantizers import ActivationUniformInferableQuantizer
 from model_compression_toolkit.trainable_infrastructure import TrainingMethod, TrainableQuantizerActivationConfig
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import mark_quantizer, QuantizationTarget
 from model_compression_toolkit.qat.keras.quantizer.base_keras_qat_weight_quantizer import BaseKerasQATWeightTrainableQuantizer
 
diff --git a/model_compression_toolkit/qat/keras/quantization_facade.py b/model_compression_toolkit/qat/keras/quantization_facade.py
index a38600132..9480c018f 100644
--- a/model_compression_toolkit/qat/keras/quantization_facade.py
+++ b/model_compression_toolkit/qat/keras/quantization_facade.py
@@ -20,12 +20,13 @@
 from model_compression_toolkit.core.common.visualization.tensorboard_writer import init_tensorboard_writer
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
+    AttachTpcToKeras
 from model_compression_toolkit.verify_packages import FOUND_TF
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
     MixedPrecisionQuantizationConfig
 from mct_quantizers import KerasActivationQuantizationHolder
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.runner import core_runner
 from model_compression_toolkit.ptq.runner import ptq_runner
 
@@ -55,8 +56,6 @@
     from model_compression_toolkit.qat.keras.quantizer.quantization_builder import quantization_builder, \
     get_activation_quantizer_holder
     from model_compression_toolkit.qat.common.qat_config import QATConfig
-    from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
-        AttachTpcToKeras
 
     DEFAULT_KERAS_TPC = get_target_platform_capabilities(TENSORFLOW, DEFAULT_TP_MODEL)
 
diff --git a/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py b/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py
index 07413b85c..5966bb146 100644
--- a/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py
+++ b/model_compression_toolkit/qat/keras/quantizer/lsq/symmetric_lsq.py
@@ -21,7 +21,7 @@
 
 from model_compression_toolkit.trainable_infrastructure import TrainingMethod
 
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper
 from mct_quantizers import QuantizationTarget, mark_quantizer
 from model_compression_toolkit.qat.common import THRESHOLD_TENSOR
diff --git a/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py b/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py
index d41f04c19..80f03d6d5 100644
--- a/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py
+++ b/model_compression_toolkit/qat/keras/quantizer/ste_rounding/symmetric_ste.py
@@ -22,7 +22,7 @@
 
 from model_compression_toolkit.trainable_infrastructure import TrainingMethod
 
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper
 from mct_quantizers import QuantizationTarget, mark_quantizer
 from model_compression_toolkit.qat.common import THRESHOLD_TENSOR
diff --git a/model_compression_toolkit/qat/pytorch/quantizer/DQA/dqa_uniform.py b/model_compression_toolkit/qat/pytorch/quantizer/DQA/dqa_uniform.py
index 931fd53b5..f87dbad22 100755
--- a/model_compression_toolkit/qat/pytorch/quantizer/DQA/dqa_uniform.py
+++ b/model_compression_toolkit/qat/pytorch/quantizer/DQA/dqa_uniform.py
@@ -20,7 +20,7 @@
 from mct_quantizers.pytorch.quantizers import WeightsUniformInferableQuantizer
 from torch import Tensor
 
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import QuantizationTarget, PytorchQuantizationWrapper, mark_quantizer
 from model_compression_toolkit.constants import RANGE_MAX, RANGE_MIN
 from model_compression_toolkit.trainable_infrastructure import TrainingMethod
diff --git a/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py b/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py
index 7a4f91ec0..250f6d872 100644
--- a/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py
+++ b/model_compression_toolkit/qat/pytorch/quantizer/lsq/symmetric_lsq.py
@@ -18,7 +18,7 @@
 import torch
 import torch.nn as nn
 
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import PytorchQuantizationWrapper
 from model_compression_toolkit.qat.common import THRESHOLD_TENSOR
 from model_compression_toolkit import constants as C
diff --git a/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py b/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py
index 20bd84b56..24acaf30e 100644
--- a/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py
+++ b/model_compression_toolkit/qat/pytorch/quantizer/lsq/uniform_lsq.py
@@ -28,7 +28,7 @@
 from model_compression_toolkit.trainable_infrastructure.common.base_trainable_quantizer import VariableGroup
 from model_compression_toolkit.trainable_infrastructure.common.trainable_quantizer_config import \
     TrainableQuantizerWeightsConfig
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
 from model_compression_toolkit.core.common.quantization.quantizers.quantizers_helpers import fix_range_to_include_zero
 from model_compression_toolkit.qat.pytorch.quantizer.base_pytorch_qat_weight_quantizer import BasePytorchQATWeightTrainableQuantizer
diff --git a/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py b/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py
index 117a1ca6b..945ad65b5 100644
--- a/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py
+++ b/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/symmetric_ste.py
@@ -18,7 +18,7 @@
 import torch
 import torch.nn as nn
 
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import PytorchQuantizationWrapper
 from model_compression_toolkit.qat.common import THRESHOLD_TENSOR
 from model_compression_toolkit import constants as C
diff --git a/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py b/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py
index 8e4675841..069ce3290 100644
--- a/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py
+++ b/model_compression_toolkit/qat/pytorch/quantizer/ste_rounding/uniform_ste.py
@@ -20,7 +20,7 @@
 from model_compression_toolkit.constants import RANGE_MAX, RANGE_MIN
 from model_compression_toolkit.trainable_infrastructure.common.constants import FQ_MIN, FQ_MAX
 
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import QuantizationTarget, PytorchQuantizationWrapper
 
 from model_compression_toolkit.qat.pytorch.quantizer.base_pytorch_qat_weight_quantizer import BasePytorchQATWeightTrainableQuantizer
diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py
index b3ee5dff4..02e60eb6a 100644
--- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py
+++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py
@@ -13,11 +13,11 @@
 # limitations under the License.
 # ==============================================================================
 
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.current_tpc import get_current_tpc
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.framework_quantization_capabilities import FrameworkQuantizationCapabilities
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.current_tpc import get_current_tpc
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import FrameworkQuantizationCapabilities
 from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.layer_filter_params import \
     LayerFilterParams
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.operations_to_layers import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.operations_to_layers import \
     OperationsToLayers, OperationsSetToLayers
 
 
diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2fw.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2fw.py
index f3505c5ef..f2ab071ad 100644
--- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2fw.py
+++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2fw.py
@@ -3,10 +3,10 @@
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
     OperatorsSet
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities, \
-    OperationsSetToLayers
 
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import \
+    FrameworkQuantizationCapabilities, OperationsSetToLayers
 
 
 class AttachTpcToFramework:
diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2keras.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2keras.py
index 2f1157868..e4f022ec7 100644
--- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2keras.py
+++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2keras.py
@@ -16,6 +16,9 @@
 import tensorflow as tf
 from packaging import version
 
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2fw import \
+    AttachTpcToFramework
 from model_compression_toolkit.verify_packages import FOUND_SONY_CUSTOM_LAYERS
 
 if FOUND_SONY_CUSTOM_LAYERS:
@@ -34,9 +37,6 @@
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS, \
     BIAS_ATTR, KERAS_KERNEL, KERAS_DEPTHWISE_KERNEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OperatorSetNames
-from model_compression_toolkit.target_platform_capabilities.target_platform import LayerFilterParams
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2fw import \
-    AttachTpcToFramework
 
 
 class AttachTpcToKeras(AttachTpcToFramework):
diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2pytorch.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2pytorch.py
index fe94ea2a4..e7347cd81 100644
--- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2pytorch.py
+++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2pytorch.py
@@ -28,8 +28,8 @@
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, PYTORCH_KERNEL, BIAS, \
     BIAS_ATTR
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OperatorSetNames
-from model_compression_toolkit.target_platform_capabilities.target_platform import LayerFilterParams, Eq
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2fw import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2fw import \
     AttachTpcToFramework
 
 
diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities.py
index e5af3d2e7..ecc200bd5 100644
--- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities.py
+++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities.py
@@ -21,15 +21,15 @@
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import \
     get_config_options_by_operators_set, get_default_op_quantization_config, get_opset_by_name
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.operations_to_layers import \
-    OperationsToLayers, OperationsSetToLayers
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.framework_quantization_capabilities_component import FrameworkQuantizationCapabilitiesComponent
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.operations_to_layers import OperationsToLayers, \
+    OperationsSetToLayers
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities_component import \
+    FrameworkQuantizationCapabilitiesComponent
 from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.layer_filter_params import LayerFilterParams
 from model_compression_toolkit.target_platform_capabilities.immutable import ImmutableClass
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OperatorsSetBase, \
     OpQuantizationConfig, QuantizationConfigOptions
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.current_tpc import _current_tpc
-
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.current_tpc import  _current_tpc
 
 class FrameworkQuantizationCapabilities(ImmutableClass):
     """
diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities_component.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities_component.py
index 6341a0e69..54011d7f9 100644
--- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities_component.py
+++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/framework_quantization_capabilities_component.py
@@ -13,7 +13,7 @@
 # limitations under the License.
 # ==============================================================================
 
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.current_tpc import  _current_tpc
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.current_tpc import  _current_tpc
 
 
 class FrameworkQuantizationCapabilitiesComponent:
diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/layer_filter_params.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/layer_filter_params.py
index 30e032635..7335d1099 100644
--- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/layer_filter_params.py
+++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/layer_filter_params.py
@@ -14,7 +14,6 @@
 # ==============================================================================
 
 from typing import Any
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import AttributeFilter
 
 
 class LayerFilterParams:
diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/operations_to_layers.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/operations_to_layers.py
index be1f57190..6b81a10a7 100644
--- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/operations_to_layers.py
+++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/operations_to_layers.py
@@ -18,8 +18,8 @@
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import \
     get_config_options_by_operators_set, is_opset_in_model
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.current_tpc import  _current_tpc
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.framework_quantization_capabilities_component import FrameworkQuantizationCapabilitiesComponent
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.current_tpc import  _current_tpc
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities_component import FrameworkQuantizationCapabilitiesComponent
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import OperatorsSetBase, OperatorSetGroup
 from model_compression_toolkit import DefaultDict
 
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py
index b4e6f273d..de6e953de 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py
@@ -16,14 +16,13 @@
 
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     IMX500_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
-tp = mct.target_platform
-
 
 def get_tpc() -> TargetPlatformCapabilities:
     """
@@ -60,7 +59,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # define a default quantization config for all non-specified weights attributes.
     default_weight_attr_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=8,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -69,7 +68,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # define a quantization config to quantize the kernel (for layers where there is a kernel attribute).
     kernel_base_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.SYMMETRIC,
+        weights_quantization_method=QuantizationMethod.SYMMETRIC,
         weights_n_bits=8,
         weights_per_channel_threshold=True,
         enable_weights_quantization=True,
@@ -77,7 +76,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # define a quantization config to quantize the bias (for layers where there is a bias attribute).
     bias_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=FLOAT_BITWIDTH,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -92,7 +91,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     eight_bits_default = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
@@ -106,7 +105,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     linear_eight_bits = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py
index 979febe5d..93bc46c1e 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py
@@ -22,7 +22,6 @@
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
-tp = mct.target_platform
 
 
 def get_tpc() -> TargetPlatformCapabilities:
@@ -55,7 +54,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # We define a default quantization config for all non-specified weights attributes.
     default_weight_attr_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.SYMMETRIC,
+        weights_quantization_method=QuantizationMethod.SYMMETRIC,
         weights_n_bits=8,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -63,7 +62,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # We define a quantization config to quantize the kernel (for layers where there is a kernel attribute).
     kernel_base_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.SYMMETRIC,
+        weights_quantization_method=QuantizationMethod.SYMMETRIC,
         weights_n_bits=8,
         weights_per_channel_threshold=False,
         enable_weights_quantization=True,
@@ -71,7 +70,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # We define a quantization config to quantize the bias (for layers where there is a bias attribute).
     bias_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.SYMMETRIC,
+        weights_quantization_method=QuantizationMethod.SYMMETRIC,
         weights_n_bits=FLOAT_BITWIDTH,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -88,7 +87,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     eight_bits_default = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
@@ -100,7 +99,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # We define an 8-bit config for linear operations quantization, that include a kernel and bias attributes.
     linear_eight_bits = schema.OpQuantizationConfig(
-        activation_quantization_method=tp.QuantizationMethod.UNIFORM,
+        activation_quantization_method=QuantizationMethod.UNIFORM,
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config},
         activation_n_bits=8,
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py
index 8e00a048d..7d4a6048f 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py
@@ -21,7 +21,6 @@
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
-tp = mct.target_platform
 
 
 def get_tpc() -> TargetPlatformCapabilities:
@@ -54,7 +53,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # We define a default quantization config for all non-specified weights attributes.
     default_weight_attr_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.SYMMETRIC,
+        weights_quantization_method=QuantizationMethod.SYMMETRIC,
         weights_n_bits=8,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -62,7 +61,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # We define a quantization config to quantize the kernel (for layers where there is a kernel attribute).
     kernel_base_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.SYMMETRIC,
+        weights_quantization_method=QuantizationMethod.SYMMETRIC,
         weights_n_bits=8,
         weights_per_channel_threshold=True,
         enable_weights_quantization=True,
@@ -70,7 +69,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # We define a quantization config to quantize the bias (for layers where there is a bias attribute).
     bias_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.SYMMETRIC,
+        weights_quantization_method=QuantizationMethod.SYMMETRIC,
         weights_n_bits=FLOAT_BITWIDTH,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -85,7 +84,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     eight_bits_default = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
@@ -97,7 +96,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # We define an 8-bit config for linear operations quantization, that include a kernel and bias attributes.
     linear_eight_bits = schema.OpQuantizationConfig(
-        activation_quantization_method=tp.QuantizationMethod.UNIFORM,
+        activation_quantization_method=QuantizationMethod.UNIFORM,
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config},
         activation_n_bits=8,
diff --git a/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py b/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py
index 7f586db37..afb312fe9 100644
--- a/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py
+++ b/model_compression_toolkit/trainable_infrastructure/common/get_quantizers.py
@@ -15,8 +15,7 @@
 from typing import Union, Any
 
 from model_compression_toolkit.logger import Logger
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
-from mct_quantizers import QuantizationTarget
+from mct_quantizers import QuantizationTarget, QuantizationMethod
 from mct_quantizers.common.constants \
     import QUANTIZATION_TARGET, QUANTIZATION_METHOD, QUANTIZER_ID
 from mct_quantizers.common.get_all_subclasses \
diff --git a/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py b/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py
index 3eec72942..18bb612ae 100644
--- a/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py
+++ b/model_compression_toolkit/trainable_infrastructure/common/trainable_quantizer_config.py
@@ -14,7 +14,8 @@
 # ==============================================================================
 from abc import ABC
 from typing import Dict, List
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+
+from mct_quantizers import QuantizationMethod
 
 
 class TrainableQuantizerCandidateConfig:
diff --git a/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/symmetric_lsq.py b/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/symmetric_lsq.py
index 1be5f2a94..f21f96721 100644
--- a/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/symmetric_lsq.py
+++ b/model_compression_toolkit/trainable_infrastructure/keras/activation_quantizers/lsq/symmetric_lsq.py
@@ -23,9 +23,8 @@
 
 from model_compression_toolkit.trainable_infrastructure import TrainingMethod
 
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper
-from mct_quantizers import QuantizationTarget, mark_quantizer
+from mct_quantizers import QuantizationTarget, mark_quantizer, QuantizationMethod
 from model_compression_toolkit.qat.common import THRESHOLD_TENSOR
 from model_compression_toolkit import constants as C
 
diff --git a/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py b/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py
index e0725cea3..0d2beafdd 100644
--- a/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py
+++ b/model_compression_toolkit/trainable_infrastructure/keras/config_serialization.py
@@ -19,7 +19,7 @@
 
 import numpy as np
 
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.trainable_infrastructure.common.trainable_quantizer_config import \
     TrainableQuantizerActivationConfig, TrainableQuantizerWeightsConfig
 from mct_quantizers.common import constants as C
diff --git a/model_compression_toolkit/xquant/common/model_folding_utils.py b/model_compression_toolkit/xquant/common/model_folding_utils.py
index 8e923379e..63af169a9 100644
--- a/model_compression_toolkit/xquant/common/model_folding_utils.py
+++ b/model_compression_toolkit/xquant/common/model_folding_utils.py
@@ -23,7 +23,8 @@
 from typing import Any, Callable
 
 from model_compression_toolkit.core.common import Graph
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import \
+    FrameworkQuantizationCapabilities
 
 
 class ModelFoldingUtils:
diff --git a/model_compression_toolkit/xquant/keras/keras_report_utils.py b/model_compression_toolkit/xquant/keras/keras_report_utils.py
index 73c249b6f..42ba4652e 100644
--- a/model_compression_toolkit/xquant/keras/keras_report_utils.py
+++ b/model_compression_toolkit/xquant/keras/keras_report_utils.py
@@ -17,6 +17,8 @@
 from model_compression_toolkit.constants import TENSORFLOW
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
+    AttachTpcToKeras
 from model_compression_toolkit.xquant.common.framework_report_utils import FrameworkReportUtils
 from model_compression_toolkit.xquant.common.model_folding_utils import ModelFoldingUtils
 from model_compression_toolkit.xquant.common.similarity_calculator import SimilarityCalculator
@@ -27,8 +29,6 @@
 from model_compression_toolkit.xquant.keras.tensorboard_utils import KerasTensorboardUtils
 from mct_quantizers.keras.metadata import get_metadata
 from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
-    AttachTpcToKeras
 
 
 class KerasReportUtils(FrameworkReportUtils):
diff --git a/tests/common_tests/helpers/generate_test_tpc.py b/tests/common_tests/helpers/generate_test_tpc.py
index 2b6276514..bb95c2e31 100644
--- a/tests/common_tests/helpers/generate_test_tpc.py
+++ b/tests/common_tests/helpers/generate_test_tpc.py
@@ -16,16 +16,18 @@
 from typing import Dict, List, Any
 
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import FLOAT_BITWIDTH, ACTIVATION_N_BITS_ATTRIBUTE, \
     SUPPORTED_INPUT_ACTIVATION_NBITS_ATTRIBUTE
 from model_compression_toolkit.target_platform_capabilities.constants import OPS_SET_LIST, KERNEL_ATTR, BIAS_ATTR, \
     WEIGHTS_N_BITS
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import Signedness, OpQuantizationConfig, \
     QuantizationConfigOptions
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import \
+    FrameworkQuantizationCapabilities
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs, generate_tpc
 import model_compression_toolkit as mct
 
-tp = mct.target_platform
 
 DEFAULT_WEIGHT_ATTR_CONFIG = 'default_weight_attr_config'
 KERNEL_BASE_CONFIG = 'kernel_base_config'
@@ -168,7 +170,7 @@ def generate_custom_test_tpc(name: str,
 
 def generate_test_fqc(name: str,
                       tpc: schema.TargetPlatformCapabilities,
-                      base_fqc: tp.FrameworkQuantizationCapabilities,
+                      base_fqc: FrameworkQuantizationCapabilities,
                       op_sets_to_layer_add: Dict[str, List[Any]] = None,
                       op_sets_to_layer_drop: Dict[str, List[Any]] = None,
                       attr_mapping: Dict[str, Dict] = {}):
@@ -189,20 +191,20 @@ def generate_test_fqc(name: str,
         # Remove empty op sets
         merged_dict = {op_set_name: layers for op_set_name, layers in merged_dict.items() if len(layers) == 0}
 
-    fqc = tp.FrameworkQuantizationCapabilities(tpc)
+    fqc = FrameworkQuantizationCapabilities(tpc)
 
     with fqc:
         for op_set_name, layers in merged_dict.items():
             am = attr_mapping.get(op_set_name)
-            tp.OperationsSetToLayers(op_set_name, layers, attr_mapping=am)
+            OperationsSetToLayers(op_set_name, layers, attr_mapping=am)
 
     return fqc
 
 
 def generate_test_attr_configs(default_cfg_nbits: int = 8,
-                               default_cfg_quantizatiom_method: tp.QuantizationMethod = tp.QuantizationMethod.POWER_OF_TWO,
+                               default_cfg_quantizatiom_method: QuantizationMethod = QuantizationMethod.POWER_OF_TWO,
                                kernel_cfg_nbits: int = 8,
-                               kernel_cfg_quantizatiom_method: tp.QuantizationMethod = tp.QuantizationMethod.POWER_OF_TWO,
+                               kernel_cfg_quantizatiom_method: QuantizationMethod = QuantizationMethod.POWER_OF_TWO,
                                enable_kernel_weights_quantization: bool = True,
                                kernel_lut_values_bitwidth: int = None):
     default_weight_attr_config = schema.AttributeQuantizationConfig(
@@ -220,7 +222,7 @@ def generate_test_attr_configs(default_cfg_nbits: int = 8,
         lut_values_bitwidth=kernel_lut_values_bitwidth)
 
     bias_config = schema.AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=FLOAT_BITWIDTH,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -236,7 +238,7 @@ def generate_test_op_qc(default_weight_attr_config: schema.AttributeQuantization
                         bias_config: schema.AttributeQuantizationConfig,
                         enable_activation_quantization: bool = True,
                         activation_n_bits: int = 8,
-                        activation_quantization_method: tp.QuantizationMethod = tp.QuantizationMethod.POWER_OF_TWO):
+                        activation_quantization_method: QuantizationMethod = QuantizationMethod.POWER_OF_TWO):
     return schema.OpQuantizationConfig(enable_activation_quantization=enable_activation_quantization,
                                           default_weight_attr_config=default_weight_attr_config,
                                           attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config,
diff --git a/tests/common_tests/helpers/prep_graph_for_func_test.py b/tests/common_tests/helpers/prep_graph_for_func_test.py
index bbb5d76a7..d77158615 100644
--- a/tests/common_tests/helpers/prep_graph_for_func_test.py
+++ b/tests/common_tests/helpers/prep_graph_for_func_test.py
@@ -29,8 +29,6 @@
 
 import model_compression_toolkit as mct
 
-tp = mct.target_platform
-
 
 def prepare_graph_with_configs(in_model,
                                fw_impl,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py
index 89f9735f0..3c6e8fcfe 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py
@@ -22,7 +22,6 @@
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
-tp = mct.target_platform
 
 
 def get_tpc() -> TargetPlatformCapabilities:
@@ -60,7 +59,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # define a default quantization config for all non-specified weights attributes.
     default_weight_attr_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=8,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -69,7 +68,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # define a quantization config to quantize the kernel (for layers where there is a kernel attribute).
     kernel_base_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.SYMMETRIC,
+        weights_quantization_method=QuantizationMethod.SYMMETRIC,
         weights_n_bits=8,
         weights_per_channel_threshold=True,
         enable_weights_quantization=True,
@@ -77,7 +76,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # define a quantization config to quantize the bias (for layers where there is a bias attribute).
     bias_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=FLOAT_BITWIDTH,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -92,7 +91,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     eight_bits_default = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
@@ -106,7 +105,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     linear_eight_bits = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py
index 40cd8414e..89aac31ee 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py
@@ -23,7 +23,6 @@
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
-tp = mct.target_platform
 
 
 def get_tpc() -> TargetPlatformCapabilities:
@@ -56,7 +55,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # We define a default quantization config for all non-specified weights attributes.
     default_weight_attr_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=8,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -64,7 +63,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # We define a quantization config to quantize the kernel (for layers where there is a kernel attribute).
     kernel_base_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.SYMMETRIC,
+        weights_quantization_method=QuantizationMethod.SYMMETRIC,
         weights_n_bits=8,
         weights_per_channel_threshold=True,
         enable_weights_quantization=True,
@@ -72,7 +71,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # We define a quantization config to quantize the bias (for layers where there is a bias attribute).
     bias_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=FLOAT_BITWIDTH,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -87,7 +86,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     eight_bits_default = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
@@ -101,7 +100,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     linear_eight_bits = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
@@ -118,11 +117,11 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     # to quantize the operations' activations using LUT.
     four_bits_lut = linear_eight_bits.clone_and_edit(
         attr_to_edit={KERNEL_ATTR: {WEIGHTS_N_BITS: 4,
-                                    WEIGHTS_QUANTIZATION_METHOD: tp.QuantizationMethod.LUT_SYM_QUANTIZER}},
+                                    WEIGHTS_QUANTIZATION_METHOD: QuantizationMethod.LUT_SYM_QUANTIZER}},
         simd_size=linear_eight_bits.simd_size * 2)
     two_bits_lut = linear_eight_bits.clone_and_edit(
         attr_to_edit={KERNEL_ATTR: {WEIGHTS_N_BITS: 2,
-                                    WEIGHTS_QUANTIZATION_METHOD: tp.QuantizationMethod.LUT_SYM_QUANTIZER}},
+                                    WEIGHTS_QUANTIZATION_METHOD: QuantizationMethod.LUT_SYM_QUANTIZER}},
         simd_size=linear_eight_bits.simd_size * 4)
     mixed_precision_cfg_list = [linear_eight_bits, four_bits_lut, two_bits_lut]
 
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py
index e16be42b8..18ce2262e 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py
@@ -23,7 +23,6 @@
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
-tp = mct.target_platform
 
 
 def get_tpc() -> TargetPlatformCapabilities:
@@ -56,7 +55,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # We define a default quantization config for all non-specified weights attributes.
     default_weight_attr_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=8,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -64,7 +63,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # We define a quantization config to quantize the kernel (for layers where there is a kernel attribute).
     kernel_base_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=8,
         weights_per_channel_threshold=True,
         enable_weights_quantization=True,
@@ -72,7 +71,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # We define a quantization config to quantize the bias (for layers where there is a bias attribute).
     bias_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=FLOAT_BITWIDTH,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -87,7 +86,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     eight_bits_default = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
@@ -99,7 +98,7 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
 
     # We define an 8-bit config for linear operations quantization, that include a kernel and bias attributes.
     linear_eight_bits = schema.OpQuantizationConfig(
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config},
         activation_n_bits=8,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py
index 5fd88882a..f2dcfb4d0 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py
@@ -23,7 +23,6 @@
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
-tp = mct.target_platform
 
 
 def get_tpc() -> TargetPlatformCapabilities:
@@ -63,7 +62,7 @@ def get_op_quantization_configs() -> \
 
     # define a default quantization config for all non-specified weights attributes.
     default_weight_attr_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=8,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -72,7 +71,7 @@ def get_op_quantization_configs() -> \
 
     # define a quantization config to quantize the kernel (for layers where there is a kernel attribute).
     kernel_base_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.SYMMETRIC,
+        weights_quantization_method=QuantizationMethod.SYMMETRIC,
         weights_n_bits=8,
         weights_per_channel_threshold=True,
         enable_weights_quantization=True,
@@ -80,7 +79,7 @@ def get_op_quantization_configs() -> \
 
     # define a quantization config to quantize the bias (for layers where there is a bias attribute).
     bias_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=FLOAT_BITWIDTH,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -95,7 +94,7 @@ def get_op_quantization_configs() -> \
     eight_bits_default = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
@@ -109,7 +108,7 @@ def get_op_quantization_configs() -> \
     linear_eight_bits = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py
index 583781f7f..20d79d9fd 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py
@@ -23,7 +23,6 @@
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
-tp = mct.target_platform
 
 
 def get_tpc() -> TargetPlatformCapabilities:
@@ -58,7 +57,7 @@ def get_op_quantization_configs() -> \
 
     # We define a default quantization config for all non-specified weights attributes.
     default_weight_attr_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=8,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -66,7 +65,7 @@ def get_op_quantization_configs() -> \
 
     # define a quantization config to quantize the kernel (for layers where there is a kernel attribute).
     kernel_base_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.SYMMETRIC,
+        weights_quantization_method=QuantizationMethod.SYMMETRIC,
         weights_n_bits=8,
         weights_per_channel_threshold=True,
         enable_weights_quantization=True,
@@ -74,7 +73,7 @@ def get_op_quantization_configs() -> \
 
     # We define a quantization config to quantize the bias (for layers where there is a bias attribute).
     bias_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=FLOAT_BITWIDTH,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -89,7 +88,7 @@ def get_op_quantization_configs() -> \
     eight_bits_default = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
@@ -103,7 +102,7 @@ def get_op_quantization_configs() -> \
     linear_eight_bits = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
@@ -120,11 +119,11 @@ def get_op_quantization_configs() -> \
     # to quantize the operations' activations using LUT.
     four_bits_lut = linear_eight_bits.clone_and_edit(
         attr_to_edit={KERNEL_ATTR: {WEIGHTS_N_BITS: 4,
-                                    WEIGHTS_QUANTIZATION_METHOD: tp.QuantizationMethod.LUT_SYM_QUANTIZER}},
+                                    WEIGHTS_QUANTIZATION_METHOD: QuantizationMethod.LUT_SYM_QUANTIZER}},
         simd_size=linear_eight_bits.simd_size * 2)
     two_bits_lut = linear_eight_bits.clone_and_edit(
         attr_to_edit={KERNEL_ATTR: {WEIGHTS_N_BITS: 2,
-                                    WEIGHTS_QUANTIZATION_METHOD: tp.QuantizationMethod.LUT_SYM_QUANTIZER}},
+                                    WEIGHTS_QUANTIZATION_METHOD: QuantizationMethod.LUT_SYM_QUANTIZER}},
         simd_size=linear_eight_bits.simd_size * 4)
     mixed_precision_cfg_list = [linear_eight_bits, four_bits_lut, two_bits_lut]
 
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py
index 75c15d2b7..f49cbfd94 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py
@@ -23,7 +23,6 @@
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
-tp = mct.target_platform
 
 
 def get_tpc() -> TargetPlatformCapabilities:
@@ -63,7 +62,7 @@ def get_op_quantization_configs() -> \
 
     # define a default quantization config for all non-specified weights attributes.
     default_weight_attr_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=8,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -72,7 +71,7 @@ def get_op_quantization_configs() -> \
 
     # define a quantization config to quantize the kernel (for layers where there is a kernel attribute).
     kernel_base_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.SYMMETRIC,
+        weights_quantization_method=QuantizationMethod.SYMMETRIC,
         weights_n_bits=8,
         weights_per_channel_threshold=True,
         enable_weights_quantization=True,
@@ -80,7 +79,7 @@ def get_op_quantization_configs() -> \
 
     # define a quantization config to quantize the bias (for layers where there is a bias attribute).
     bias_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=FLOAT_BITWIDTH,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -95,7 +94,7 @@ def get_op_quantization_configs() -> \
     eight_bits_default = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
@@ -109,7 +108,7 @@ def get_op_quantization_configs() -> \
     linear_eight_bits = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
@@ -166,7 +165,7 @@ def generate_tpc(default_config: OpQuantizationConfig,
     const_config = default_config.clone_and_edit(
         default_weight_attr_config=default_config.default_weight_attr_config.clone_and_edit(
             enable_weights_quantization=True, weights_per_channel_threshold=True,
-            weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO))
+            weights_quantization_method=QuantizationMethod.POWER_OF_TWO))
     const_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([const_config]))
 
     # 16 bits inputs and outputs. Currently, only defined for consts since they are used in operators that
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py
index 631c82513..2c41ad629 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py
@@ -23,7 +23,6 @@
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
-tp = mct.target_platform
 
 
 def get_tpc() -> TargetPlatformCapabilities:
@@ -58,7 +57,7 @@ def get_op_quantization_configs() -> \
 
     # We define a default quantization config for all non-specified weights attributes.
     default_weight_attr_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=8,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -66,7 +65,7 @@ def get_op_quantization_configs() -> \
 
     # define a quantization config to quantize the kernel (for layers where there is a kernel attribute).
     kernel_base_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.SYMMETRIC,
+        weights_quantization_method=QuantizationMethod.SYMMETRIC,
         weights_n_bits=8,
         weights_per_channel_threshold=True,
         enable_weights_quantization=True,
@@ -74,7 +73,7 @@ def get_op_quantization_configs() -> \
 
     # We define a quantization config to quantize the bias (for layers where there is a bias attribute).
     bias_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=FLOAT_BITWIDTH,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -89,7 +88,7 @@ def get_op_quantization_configs() -> \
     eight_bits_default = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
@@ -103,7 +102,7 @@ def get_op_quantization_configs() -> \
     linear_eight_bits = schema.OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
@@ -120,11 +119,11 @@ def get_op_quantization_configs() -> \
     # to quantize the operations' activations using LUT.
     four_bits_lut = linear_eight_bits.clone_and_edit(
         attr_to_edit={KERNEL_ATTR: {WEIGHTS_N_BITS: 4,
-                                    WEIGHTS_QUANTIZATION_METHOD: tp.QuantizationMethod.LUT_SYM_QUANTIZER}},
+                                    WEIGHTS_QUANTIZATION_METHOD: QuantizationMethod.LUT_SYM_QUANTIZER}},
         simd_size=linear_eight_bits.simd_size * 2)
     two_bits_lut = linear_eight_bits.clone_and_edit(
         attr_to_edit={KERNEL_ATTR: {WEIGHTS_N_BITS: 2,
-                                    WEIGHTS_QUANTIZATION_METHOD: tp.QuantizationMethod.LUT_SYM_QUANTIZER}},
+                                    WEIGHTS_QUANTIZATION_METHOD: QuantizationMethod.LUT_SYM_QUANTIZER}},
         simd_size=linear_eight_bits.simd_size * 4)
     mixed_precision_cfg_list = [linear_eight_bits, four_bits_lut, two_bits_lut]
 
@@ -164,7 +163,7 @@ def generate_tpc(default_config: OpQuantizationConfig,
     const_config = default_config.clone_and_edit(
         default_weight_attr_config=default_config.default_weight_attr_config.clone_and_edit(
             enable_weights_quantization=True, weights_per_channel_threshold=True,
-            weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO))
+            weights_quantization_method=QuantizationMethod.POWER_OF_TWO))
     const_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([const_config]))
 
     # 16 bits inputs and outputs. Currently, only defined for consts since they are used in operators that
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py
index 9c410c503..5d30ce04b 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py
@@ -23,7 +23,6 @@
     Signedness, \
     AttributeQuantizationConfig, OpQuantizationConfig
 
-tp = mct.target_platform
 
 def get_tpc() -> TargetPlatformCapabilities:
     """
@@ -62,7 +61,7 @@ def get_op_quantization_configs() -> \
 
     # define a default quantization config for all non-specified weights attributes.
     default_weight_attr_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=8,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -71,7 +70,7 @@ def get_op_quantization_configs() -> \
 
     # define a quantization config to quantize the kernel (for layers where there is a kernel attribute).
     kernel_base_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.SYMMETRIC,
+        weights_quantization_method=QuantizationMethod.SYMMETRIC,
         weights_n_bits=8,
         weights_per_channel_threshold=True,
         enable_weights_quantization=True,
@@ -79,7 +78,7 @@ def get_op_quantization_configs() -> \
 
     # define a quantization config to quantize the bias (for layers where there is a bias attribute).
     bias_config = AttributeQuantizationConfig(
-        weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
         weights_n_bits=FLOAT_BITWIDTH,
         weights_per_channel_threshold=False,
         enable_weights_quantization=False,
@@ -94,7 +93,7 @@ def get_op_quantization_configs() -> \
     eight_bits_default = OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
@@ -108,7 +107,7 @@ def get_op_quantization_configs() -> \
     linear_eight_bits = OpQuantizationConfig(
         default_weight_attr_config=default_weight_attr_config,
         attr_weights_configs_mapping={KERNEL_ATTR: kernel_base_config, BIAS_ATTR: bias_config},
-        activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
         activation_n_bits=8,
         supported_input_activation_n_bits=8,
         enable_activation_quantization=True,
@@ -173,7 +172,7 @@ def generate_tpc(default_config: OpQuantizationConfig,
     const_config = default_config.clone_and_edit(
         default_weight_attr_config=default_config.default_weight_attr_config.clone_and_edit(
             enable_weights_quantization=True, weights_per_channel_threshold=True,
-            weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO))
+            weights_quantization_method=QuantizationMethod.POWER_OF_TWO))
     const_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([const_config]))
 
     # 16 bits inputs and outputs. Currently, only defined for consts since they are used in operators that
@@ -191,7 +190,7 @@ def generate_tpc(default_config: OpQuantizationConfig,
         supported_input_activation_n_bits=(8, 16),
         default_weight_attr_config=default_config.default_weight_attr_config.clone_and_edit(
             enable_weights_quantization=True, weights_per_channel_threshold=False,
-            weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO)
+            weights_quantization_method=QuantizationMethod.POWER_OF_TWO)
     )
     const_config_input16_output16_per_tensor = const_config_input16_per_tensor.clone_and_edit(
         activation_n_bits=16, signedness=Signedness.SIGNED)
diff --git a/tests/common_tests/test_tpc.py b/tests/common_tests/test_tpc.py
index 9802152a1..e4a5645d1 100644
--- a/tests/common_tests/test_tpc.py
+++ b/tests/common_tests/test_tpc.py
@@ -26,7 +26,6 @@
     export_target_platform_model
 from tests.common_tests.helpers.generate_test_tpc import generate_test_attr_configs, generate_test_op_qc
 
-tp = mct.target_platform
 
 TEST_QC = generate_test_op_qc(**generate_test_attr_configs())
 TEST_QCO = schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC]))
diff --git a/tests/external_tests/keras_tests/models_tests/test_networks_runner.py b/tests/external_tests/keras_tests/models_tests/test_networks_runner.py
index fcfbe836b..f542d3eab 100644
--- a/tests/external_tests/keras_tests/models_tests/test_networks_runner.py
+++ b/tests/external_tests/keras_tests/models_tests/test_networks_runner.py
@@ -31,7 +31,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
 
 QUANTIZATION_CONFIG = mct.core.QuantizationConfig(activation_error_method=mct.core.QuantizationErrorMethod.MSE,
                                                   weights_error_method=mct.core.QuantizationErrorMethod.MSE,
diff --git a/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tpc.py b/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tpc.py
index bdac7a0c0..3b09d9712 100644
--- a/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tpc.py
+++ b/tests/keras_tests/exporter_tests/tflite_int8/imx500_int8_tpc.py
@@ -36,7 +36,6 @@
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OpQuantizationConfig
 from tests.common_tests.helpers.tpcs_for_tests.v1.tpc import generate_tpc
 
-tp = mct.target_platform
 
 
 def get_tpc(edit_weights_params_dict, edit_act_params_dict) -> TargetPlatformCapabilities:
@@ -63,16 +62,16 @@ def get_op_quantization_configs() -> Tuple[OpQuantizationConfig, List[OpQuantiza
     return eight_bits, mixed_precision_cfg_list, default_config
 
 
-def get_int8_tpc(edit_weights_params_dict={}, edit_act_params_dict={}) -> tp.TargetPlatformCapabilities:
+def get_int8_tpc(edit_weights_params_dict={}, edit_act_params_dict={}) -> TargetPlatformCapabilities:
     default_tpc = get_tpc(edit_weights_params_dict, edit_act_params_dict)
     return default_tpc
 
 
 def generate_keras_tpc(name: str, tpc: schema.TargetPlatformCapabilities):
-    keras_tpc = tp.FrameworkQuantizationCapabilities(tpc)
+    keras_tpc = FrameworkQuantizationCapabilities(tpc)
 
     with keras_tpc:
-        tp.OperationsSetToLayers("NoQuantization", [Reshape,
+        OperationsSetToLayers("NoQuantization", [Reshape,
                                                     tf.reshape,
                                                     Permute,
                                                     tf.transpose,
@@ -92,7 +91,7 @@ def generate_keras_tpc(name: str, tpc: schema.TargetPlatformCapabilities):
                                                     tf.nn.top_k,
                                                     tf.__operators__.getitem,
                                                     tf.compat.v1.shape])
-        tp.OperationsSetToLayers("Conv",
+        OperationsSetToLayers("Conv",
                                  [Conv2D,
                                   DepthwiseConv2D,
                                   Conv2DTranspose,
@@ -104,22 +103,22 @@ def generate_keras_tpc(name: str, tpc: schema.TargetPlatformCapabilities):
                                          DepthwiseConv2D: KERAS_DEPTHWISE_KERNEL,
                                          tf.nn.depthwise_conv2d: KERAS_DEPTHWISE_KERNEL}, default_value=KERAS_KERNEL),
                                      BIAS_ATTR: DefaultDict(default_value=BIAS)})
-        tp.OperationsSetToLayers("FullyConnected", [Dense],
+        OperationsSetToLayers("FullyConnected", [Dense],
                                  attr_mapping={KERNEL_ATTR: DefaultDict(default_value=KERAS_KERNEL),
                                                BIAS_ATTR: DefaultDict(default_value=BIAS)})
-        tp.OperationsSetToLayers("AnyReLU", [tf.nn.relu,
+        OperationsSetToLayers("AnyReLU", [tf.nn.relu,
                                              tf.nn.relu6,
                                              tf.nn.leaky_relu,
                                              ReLU,
                                              LeakyReLU,
-                                             tp.LayerFilterParams(Activation, activation="relu"),
-                                             tp.LayerFilterParams(Activation, activation="leaky_relu")])
-        tp.OperationsSetToLayers("Add", [tf.add, Add])
-        tp.OperationsSetToLayers("Sub", [tf.subtract, Subtract])
-        tp.OperationsSetToLayers("Mul", [tf.math.multiply, Multiply])
-        tp.OperationsSetToLayers("Div", [tf.math.divide])
-        tp.OperationsSetToLayers("PReLU", [PReLU])
-        tp.OperationsSetToLayers("Swish", [tf.nn.swish, tp.LayerFilterParams(Activation, activation="swish")])
-        tp.OperationsSetToLayers("Sigmoid", [tf.nn.sigmoid, tp.LayerFilterParams(Activation, activation="sigmoid")])
-        tp.OperationsSetToLayers("Tanh", [tf.nn.tanh, tp.LayerFilterParams(Activation, activation="tanh")])
+                                             LayerFilterParams(Activation, activation="relu"),
+                                             LayerFilterParams(Activation, activation="leaky_relu")])
+        OperationsSetToLayers("Add", [tf.add, Add])
+        OperationsSetToLayers("Sub", [tf.subtract, Subtract])
+        OperationsSetToLayers("Mul", [tf.math.multiply, Multiply])
+        OperationsSetToLayers("Div", [tf.math.divide])
+        OperationsSetToLayers("PReLU", [PReLU])
+        OperationsSetToLayers("Swish", [tf.nn.swish, LayerFilterParams(Activation, activation="swish")])
+        OperationsSetToLayers("Sigmoid", [tf.nn.sigmoid, LayerFilterParams(Activation, activation="sigmoid")])
+        OperationsSetToLayers("Tanh", [tf.nn.tanh, LayerFilterParams(Activation, activation="tanh")])
     return keras_tpc
diff --git a/tests/keras_tests/exporter_tests/tflite_int8/networks/conv2d_test.py b/tests/keras_tests/exporter_tests/tflite_int8/networks/conv2d_test.py
index 918314119..882552bc8 100644
--- a/tests/keras_tests/exporter_tests/tflite_int8/networks/conv2d_test.py
+++ b/tests/keras_tests/exporter_tests/tflite_int8/networks/conv2d_test.py
@@ -16,8 +16,8 @@
 import numpy as np
 
 import tests.keras_tests.exporter_tests.constants as constants
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core.keras.constants import KERNEL
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tpc import get_int8_tpc
 from tests.keras_tests.exporter_tests.tflite_int8.tflite_int8_exporter_base_test import TFLiteINT8ExporterBaseTest
 from tests.keras_tests.utils import get_layers_from_model_by_type
diff --git a/tests/keras_tests/exporter_tests/tflite_int8/networks/mobilenetv2_test.py b/tests/keras_tests/exporter_tests/tflite_int8/networks/mobilenetv2_test.py
index c425ce3f9..f4ce5fd27 100644
--- a/tests/keras_tests/exporter_tests/tflite_int8/networks/mobilenetv2_test.py
+++ b/tests/keras_tests/exporter_tests/tflite_int8/networks/mobilenetv2_test.py
@@ -18,7 +18,7 @@
 from keras.applications import MobileNetV2
 
 import tests.keras_tests.exporter_tests.constants as constants
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tpc import get_int8_tpc
 from tests.keras_tests.exporter_tests.tflite_int8.tflite_int8_exporter_base_test import TFLiteINT8ExporterBaseTest
 
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/bn_attributes_quantization_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/bn_attributes_quantization_test.py
index f342b1a49..ff161d818 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/bn_attributes_quantization_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/bn_attributes_quantization_test.py
@@ -30,8 +30,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
-
 
 def _generate_bn_quantized_tpm(quantize_linear):
     attr_cfgs_dict = generate_test_attr_configs()
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/bn_folding_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/bn_folding_test.py
index 616a2d4ea..7f71c7590 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/bn_folding_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/bn_folding_test.py
@@ -28,7 +28,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
 
 
 def update_kernel_for_bn_folding_fn(conv_layer: layers.Conv2D,
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/const_quantization_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/const_quantization_test.py
index d63d97350..24e9e85e9 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/const_quantization_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/const_quantization_test.py
@@ -32,7 +32,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
 
 
 def create_const_quant_tpc(qmethod):
@@ -73,7 +72,7 @@ class ConstQuantizationTest(BaseKerasFeatureNetworkTest):
 
     def __init__(self, unit_test, layer, const, is_list_input=False, input_reverse_order=False, use_kwargs=False,
                  error_method: mct.core.QuantizationErrorMethod = mct.core.QuantizationErrorMethod.MSE,
-                 qmethod: tp.QuantizationMethod = tp.QuantizationMethod.POWER_OF_TWO,
+                 qmethod: QuantizationMethod = QuantizationMethod.POWER_OF_TWO,
                  input_shape=(32, 32, 16)):
         super(ConstQuantizationTest, self).__init__(unit_test=unit_test, input_shape=input_shape)
         self.layer = layer
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/const_representation_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/const_representation_test.py
index 7d662f3ed..fedd80908 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/const_representation_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/const_representation_test.py
@@ -26,7 +26,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
 
 
 class ConstRepresentationTest(BaseKerasFeatureNetworkTest):
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/gptq/gptq_conv.py b/tests/keras_tests/feature_networks_tests/feature_networks/gptq/gptq_conv.py
index 3367bc7a7..469a71bd5 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/gptq/gptq_conv.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/gptq/gptq_conv.py
@@ -21,7 +21,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
 
 
 def build_model(in_input_shape: List[int], group: int = 1, dilation_rate=(1, 1)) -> keras.Model:
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/gptq/gptq_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/gptq/gptq_test.py
index 487297ced..e4e16feaf 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/gptq/gptq_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/gptq/gptq_test.py
@@ -18,11 +18,11 @@
 import tensorflow as tf
 
 import model_compression_toolkit as mct
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit import DefaultDict
 from model_compression_toolkit.constants import GPTQ_HESSIAN_NUM_SAMPLES
 from model_compression_toolkit.gptq.common.gptq_config import GradientPTQConfig, RoundingType, GradientPTQConfig, \
     GPTQHessianScoresConfig, GradualActivationQuantizationConfig
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.core.common.user_info import UserInformation
 from model_compression_toolkit.gptq.common.gptq_constants import QUANT_PARAM_LEARNING_STR, MAX_LSB_STR
 from model_compression_toolkit.gptq.keras.gptq_loss import multiple_tensors_mse_loss
@@ -32,7 +32,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
 
 
 def build_model(in_input_shape: List[int]) -> keras.Model:
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/linear_collapsing_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/linear_collapsing_test.py
index 70d5e18ae..636d7502a 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/linear_collapsing_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/linear_collapsing_test.py
@@ -31,7 +31,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
 
 
 class BaseConv2DCollapsingTest(BaseKerasFeatureNetworkTest, ABC):
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/lut_quantizer.py b/tests/keras_tests/feature_networks_tests/feature_networks/lut_quantizer.py
index 03c27db77..b3d24daa3 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/lut_quantizer.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/lut_quantizer.py
@@ -34,7 +34,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
 
 
 def get_uniform_weights(kernel, in_channels, out_channels):
@@ -59,7 +58,7 @@ def __init__(self, unit_test, weights_n_bits: int = 3, is_symmetric=False):
         super().__init__(unit_test, num_calibration_iter=5, val_batch_size=32)
 
     def get_tpc(self):
-        qmethod = tp.QuantizationMethod.LUT_SYM_QUANTIZER if self.is_symmetric else tp.QuantizationMethod.LUT_POT_QUANTIZER
+        qmethod = QuantizationMethod.LUT_SYM_QUANTIZER if self.is_symmetric else QuantizationMethod.LUT_POT_QUANTIZER
         tpc = generate_test_tpc({'weights_n_bits': self.weights_n_bits,
                                            'weights_quantization_method': qmethod})
         return generate_keras_tpc(name='lut_quantizer_test', tpc=tpc)
@@ -69,7 +68,7 @@ def get_debug_config(self):
             network_editor=[EditRule(filter=NodeNameFilter(self.node_to_change_name),
                                      action=ChangeCandidatesWeightsQuantizationMethod(
                                          weights_quantization_method=
-                                         mct.target_platform.QuantizationMethod.POWER_OF_TWO,
+                                         mct.QuantizationMethod.POWER_OF_TWO,
                                          attr_name=KERNEL))])
 
     def get_input_shapes(self):
@@ -105,7 +104,7 @@ def __init__(self, unit_test, activation_n_bits: int = 3):
         super().__init__(unit_test, num_calibration_iter=5, val_batch_size=32)
 
     def get_tpc(self):
-        tpc = generate_test_tpc({'activation_quantization_method': tp.QuantizationMethod.LUT_POT_QUANTIZER,
+        tpc = generate_test_tpc({'activation_quantization_method': QuantizationMethod.LUT_POT_QUANTIZER,
                                            'activation_n_bits': self.activation_n_bits})
         return generate_keras_tpc(name='lut_quantizer_test', tpc=tpc)
 
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/metadata_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/metadata_test.py
index 5089c1d97..9c0d01b0c 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/metadata_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/metadata_test.py
@@ -26,7 +26,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
 
 
 class MetadataTest(BaseKerasFeatureNetworkTest):
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision/requires_mixed_precision_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision/requires_mixed_precision_test.py
index 75e9c5516..f9062ba12 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision/requires_mixed_precision_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision/requires_mixed_precision_test.py
@@ -19,7 +19,7 @@
 from packaging import version
 
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 
 if version.parse(tf.__version__) >= version.parse("2.13"):
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py
index cb9402468..15d2f71f6 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py
@@ -39,7 +39,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
 
 
 def get_base_mp_nbits_candidates():
@@ -567,7 +566,7 @@ def get_resource_utilization(self):
     def get_core_config(self):
         return CoreConfig(quantization_config=QuantizationConfig(
             custom_tpc_opset_to_layer={"Softmax": CustomOpsetLayers([layers.Softmax, tf.nn.softmax, softmax,
-                                                    tp.LayerFilterParams(layers.Activation, activation=SOFTMAX)]),
+                                                    LayerFilterParams(layers.Activation, activation=SOFTMAX)]),
                                        "Input": CustomOpsetLayers([layers.InputLayer])}))
 
     def get_tpc(self):
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/edit_qc_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/edit_qc_test.py
index a3dfbf5f0..78fd3446c 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/edit_qc_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/edit_qc_test.py
@@ -16,6 +16,7 @@
 import tensorflow as tf
 from tqdm import tqdm
 
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core import DebugConfig
 from model_compression_toolkit.core.common.mixed_precision.bit_width_setter import set_bit_widths
 from model_compression_toolkit.core.common.mixed_precision.mixed_precision_search_facade import search_bit_width
@@ -34,10 +35,9 @@
 from model_compression_toolkit.core.common.substitutions.apply_substitutions import substitute
 from model_compression_toolkit.core.graph_prep_runner import graph_preparation_runner
 from model_compression_toolkit.core.keras.constants import KERNEL
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 
 
 keras = tf.keras
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/node_filter_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/node_filter_test.py
index 4e11c29d0..afcd50a06 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/node_filter_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/node_filter_test.py
@@ -31,7 +31,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
 
 
 def get_uniform_weights(kernel, in_channels, out_channels):
@@ -58,7 +57,7 @@ def __init__(self, unit_test, activation_n_bits: int = 3, weights_n_bits: int =
 
     def get_tpc(self):
         tpc = generate_test_tpc({
-            'weights_quantization_method': tp.QuantizationMethod.POWER_OF_TWO,
+            'weights_quantization_method': QuantizationMethod.POWER_OF_TWO,
             'activation_n_bits': 16,
             'weights_n_bits': 16})
         return generate_keras_tpc(name="scope_filter_test", tpc=tpc)
@@ -140,7 +139,7 @@ def __init__(self, unit_test, activation_n_bits: int = 3, weights_n_bits: int =
 
     def get_tpc(self):
         tpc = generate_test_tpc({
-            'weights_quantization_method': tp.QuantizationMethod.POWER_OF_TWO,
+            'weights_quantization_method': QuantizationMethod.POWER_OF_TWO,
             'activation_n_bits': 16,
             'weights_n_bits': 16})
         return generate_keras_tpc(name="name_filter_test", tpc=tpc)
@@ -205,14 +204,14 @@ def __init__(self, unit_test, activation_n_bits: int = 3, weights_n_bits: int =
         super().__init__(unit_test )
 
     def weights_params_fn(self):
-        return get_weights_quantization_params_fn(tp.QuantizationMethod.POWER_OF_TWO)
+        return get_weights_quantization_params_fn(QuantizationMethod.POWER_OF_TWO)
 
     def activations_params_fn(self):
-        return get_activation_quantization_params_fn(tp.QuantizationMethod.POWER_OF_TWO)
+        return get_activation_quantization_params_fn(QuantizationMethod.POWER_OF_TWO)
 
     def get_tpc(self):
         tpc = generate_test_tpc({
-            'weights_quantization_method': tp.QuantizationMethod.POWER_OF_TWO,
+            'weights_quantization_method': QuantizationMethod.POWER_OF_TWO,
             'activation_n_bits': 16,
             'weights_n_bits': 16})
         return generate_keras_tpc(name="type_filter_test", tpc=tpc)
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/qat/qat_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/qat/qat_test.py
index fd27723ae..6fd65e2f9 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/qat/qat_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/qat/qat_test.py
@@ -45,8 +45,8 @@
 
 class QuantizationAwareTrainingTest(BaseKerasFeatureNetworkTest):
     def __init__(self, unit_test, layer, weight_bits=2, activation_bits=4, finalize=False,
-                 weights_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO,
-                 activation_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO,
+                 weights_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
+                 activation_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
                  test_loading=False):
         self.layer = layer
         self.weight_bits = weight_bits
@@ -163,8 +163,8 @@ def compare(self, quantized_model, float_model, loaded_model, input_x=None, quan
 
 class QATWrappersTest(BaseKerasFeatureNetworkTest):
     def __init__(self, unit_test, layer, weight_bits=2, activation_bits=4, finalize=True,
-                 weights_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO,
-                 activation_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO,
+                 weights_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
+                 activation_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
                  training_method=TrainingMethod.STE,
                  per_channel=True,
                  test_loading=False):
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/residual_collapsing_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/residual_collapsing_test.py
index 150511b39..26325f5c6 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/residual_collapsing_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/residual_collapsing_test.py
@@ -22,7 +22,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
 
 
 class BaseResidualCollapsingTest(BaseKerasFeatureNetworkTest):
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py
index 44fb8de79..f5c47fb7e 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/second_moment_correction_test.py
@@ -20,6 +20,7 @@
 import tensorflow as tf
 
 import model_compression_toolkit as mct
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit import get_target_platform_capabilities
 from model_compression_toolkit.constants import TENSORFLOW
 from model_compression_toolkit.core import CoreConfig, QuantizationConfig, DEFAULTCONFIG, FrameworkInfo, DebugConfig
@@ -37,9 +38,7 @@
 from model_compression_toolkit.core.runner import core_runner
 from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
@@ -51,7 +50,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
 
 
 class BaseSecondMomentTest(BaseKerasFeatureNetworkTest, ABC):
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/symmetric_threshold_selection_activation_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/symmetric_threshold_selection_activation_test.py
index 93e1940eb..0801c6158 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/symmetric_threshold_selection_activation_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/symmetric_threshold_selection_activation_test.py
@@ -24,7 +24,6 @@
 import model_compression_toolkit as mct
 from tests.keras_tests.utils import get_layers_from_model_by_type
 
-tp = mct.target_platform
 keras = tf.keras
 layers = keras.layers
 
@@ -39,7 +38,7 @@ def generate_inputs(self):
 
     def get_tpc(self):
         tpc = generate_test_tpc({
-            'activation_quantization_method': tp.QuantizationMethod.SYMMETRIC,
+            'activation_quantization_method': QuantizationMethod.SYMMETRIC,
             'activation_n_bits': 8})
         return generate_keras_tpc(name="symmetric_threshold_test", tpc=tpc)
 
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/test_kmeans_quantizer.py b/tests/keras_tests/feature_networks_tests/feature_networks/test_kmeans_quantizer.py
index c96d414dc..9af69bbf4 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/test_kmeans_quantizer.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/test_kmeans_quantizer.py
@@ -15,7 +15,7 @@
 
 import unittest
 
-from model_compression_toolkit import target_platform
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core.common.network_editors.node_filters import NodeNameFilter
 from model_compression_toolkit.core.common.network_editors.actions import EditRule, ChangeCandidatesWeightsQuantConfigAttr
 from model_compression_toolkit.core.common.quantization.quantizers.uniform_quantizers import power_of_two_quantizer
@@ -53,7 +53,7 @@ class KmeansQuantizerTestBase(BaseKerasFeatureNetworkTest):
 
     def __init__(self,
                  unit_test,
-                 quantization_method: target_platform.QuantizationMethod.LUT_POT_QUANTIZER,
+                 quantization_method: QuantizationMethod.LUT_POT_QUANTIZER,
                  weight_fn=get_uniform_weights,
                  weights_n_bits: int = 3):
 
@@ -94,7 +94,7 @@ def get_debug_config(self):
         return mct.core.DebugConfig(network_editor=[EditRule(filter=NodeNameFilter(self.node_to_change_name),
                                                              action=ChangeCandidatesWeightsQuantConfigAttr(
                                                                  attr_name=KERNEL,
-                                                                 weights_quantization_method=target_platform.QuantizationMethod.POWER_OF_TWO)),
+                                                                 weights_quantization_method=QuantizationMethod.POWER_OF_TWO)),
                                                     EditRule(filter=NodeNameFilter(self.node_to_change_name),
                                                              action=ChangeCandidatesWeightsQuantConfigAttr(
                                                                  attr_name=KERNEL,
@@ -120,7 +120,7 @@ class KmeansQuantizerTest(KmeansQuantizerTestBase):
 
     def __init__(self,
                  unit_test,
-                 quantization_method: target_platform.QuantizationMethod.LUT_POT_QUANTIZER,
+                 quantization_method: QuantizationMethod.LUT_POT_QUANTIZER,
                  weights_n_bits: int = 3):
         super().__init__(unit_test, quantization_method, get_uniform_weights, weights_n_bits)
 
@@ -141,7 +141,7 @@ class KmeansQuantizerNotPerChannelTest(KmeansQuantizerTestBase):
 
     def __init__(self,
                  unit_test,
-                 quantization_method: target_platform.QuantizationMethod.LUT_POT_QUANTIZER,
+                 quantization_method: QuantizationMethod.LUT_POT_QUANTIZER,
                  weights_n_bits: int = 3):
         super().__init__(unit_test, quantization_method, get_uniform_weights, weights_n_bits)
 
@@ -163,7 +163,7 @@ class KmeansQuantizerTestManyClasses(KmeansQuantizerTestBase):
     This test checks the chosen quantization method is different that symmetric uniform
     '''
 
-    def __init__(self, unit_test, quantization_method: target_platform.QuantizationMethod.LUT_POT_QUANTIZER, weights_n_bits: int = 8):
+    def __init__(self, unit_test, quantization_method: QuantizationMethod.LUT_POT_QUANTIZER, weights_n_bits: int = 8):
         super().__init__(unit_test, quantization_method, get_uniform_weights, weights_n_bits)
 
     def compare(self, quantized_model, float_model, input_x=None, quantization_info=None):
@@ -180,7 +180,7 @@ class KmeansQuantizerTestZeroWeights(KmeansQuantizerTestBase):
     '''
 
     def __init__(self, unit_test,
-                 quantization_method: target_platform.QuantizationMethod.LUT_POT_QUANTIZER,
+                 quantization_method: QuantizationMethod.LUT_POT_QUANTIZER,
                  weights_n_bits: int = 3):
         super().__init__(unit_test, quantization_method, get_zero_as_weights, weights_n_bits)
 
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/tpc_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/tpc_test.py
index 4904890f9..cadfbf076 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/tpc_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/tpc_test.py
@@ -22,7 +22,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
 
 
 class TpcTest(BaseKerasFeatureNetworkTest):
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/uniform_range_selection_activation_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/uniform_range_selection_activation_test.py
index 9b54fc546..cc43072d0 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/uniform_range_selection_activation_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/uniform_range_selection_activation_test.py
@@ -27,7 +27,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
 
 
 class UniformRangeSelectionActivationTest(BaseKerasFeatureNetworkTest):
@@ -43,7 +42,7 @@ def get_quantization_config(self):
 
     def get_tpc(self):
         tpc = generate_test_tpc({
-            'activation_quantization_method': tp.QuantizationMethod.UNIFORM,
+            'activation_quantization_method': QuantizationMethod.UNIFORM,
             'activation_n_bits': 8})
         return generate_keras_tpc(name="uniform_range_test", tpc=tpc)
 
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py b/tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py
index 18f7113db..303972294 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/weights_mixed_precision_tests.py
@@ -39,7 +39,6 @@
 
 keras = tf.keras
 layers = keras.layers
-tp = mct.target_platform
 
 
 class MixedPrecisionBaseTest(BaseKerasFeatureNetworkTest):
diff --git a/tests/keras_tests/feature_networks_tests/test_features_runner.py b/tests/keras_tests/feature_networks_tests/test_features_runner.py
index e34525ac1..3d9988897 100644
--- a/tests/keras_tests/feature_networks_tests/test_features_runner.py
+++ b/tests/keras_tests/feature_networks_tests/test_features_runner.py
@@ -22,11 +22,11 @@
 from sklearn.metrics.pairwise import distance_metrics
 from tensorflow.keras.layers import PReLU, ELU
 
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core import QuantizationErrorMethod
 from model_compression_toolkit.core.common.mixed_precision.distance_weighting import MpDistanceWeighting
 from model_compression_toolkit.core.common.network_editors import NodeTypeFilter, NodeNameFilter
 from model_compression_toolkit.gptq.keras.gptq_loss import sample_layer_attention_loss
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.gptq import RoundingType
 from model_compression_toolkit.target_platform_capabilities import constants as C
 from tests.keras_tests.feature_networks_tests.feature_networks.activation_bias_correction_test import \
diff --git a/tests/keras_tests/function_tests/test_activation_quantization_holder_gptq.py b/tests/keras_tests/function_tests/test_activation_quantization_holder_gptq.py
index 8385b4cf1..aae53f6c4 100644
--- a/tests/keras_tests/function_tests/test_activation_quantization_holder_gptq.py
+++ b/tests/keras_tests/function_tests/test_activation_quantization_holder_gptq.py
@@ -11,12 +11,11 @@
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.gptq.keras.gptq_keras_implementation import GPTQKerasImplemantation
 from model_compression_toolkit.gptq.keras.gptq_training import KerasGPTQTrainer
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_quantization_parameters
 
-tp = mct.target_platform
 
 
 def basic_model(input_shape):
diff --git a/tests/keras_tests/function_tests/test_activation_weights_composition_substitution.py b/tests/keras_tests/function_tests/test_activation_weights_composition_substitution.py
index bc6fd3c4a..b3a729a34 100644
--- a/tests/keras_tests/function_tests/test_activation_weights_composition_substitution.py
+++ b/tests/keras_tests/function_tests/test_activation_weights_composition_substitution.py
@@ -21,7 +21,7 @@
 import tensorflow as tf
 
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs
 
@@ -50,7 +50,6 @@
 import model_compression_toolkit as mct
 from tests.keras_tests.tpc_keras import get_tpc_with_activation_mp_keras
 
-tp = mct.target_platform
 
 INPUT_SHAPE = (8, 8, 3)
 
diff --git a/tests/keras_tests/function_tests/test_cfg_candidates_filter.py b/tests/keras_tests/function_tests/test_cfg_candidates_filter.py
index a01269ddf..272b0683e 100644
--- a/tests/keras_tests/function_tests/test_cfg_candidates_filter.py
+++ b/tests/keras_tests/function_tests/test_cfg_candidates_filter.py
@@ -27,12 +27,11 @@
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
 from model_compression_toolkit.core.common.fusion.layer_fusing import fusion
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from tests.common_tests.helpers.generate_test_tpc import generate_test_attr_configs, generate_test_op_qc
 from tests.keras_tests.tpc_keras import get_tpc_with_activation_mp_keras
 
-tp = mct.target_platform
 
 
 def get_full_bitwidth_candidates():
diff --git a/tests/keras_tests/function_tests/test_custom_layer.py b/tests/keras_tests/function_tests/test_custom_layer.py
index 49287ebd0..420d558ee 100644
--- a/tests/keras_tests/function_tests/test_custom_layer.py
+++ b/tests/keras_tests/function_tests/test_custom_layer.py
@@ -22,8 +22,8 @@
 from model_compression_toolkit.core import CoreConfig, QuantizationConfig
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import Signedness
 from model_compression_toolkit.target_platform_capabilities.constants import BIAS_ATTR, KERNEL_ATTR
-from model_compression_toolkit.target_platform_capabilities.target_platform import LayerFilterParams
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams
 from tests.common_tests.helpers.generate_test_tpc import generate_test_attr_configs, DEFAULT_WEIGHT_ATTR_CONFIG, \
     KERNEL_BASE_CONFIG, BIAS_CONFIG
 
@@ -65,9 +65,8 @@ def get_tpc():
     Returns:
          FrameworkQuantizationCapabilities object
     """
-    tp = mct.target_platform
     attr_cfg = generate_test_attr_configs(kernel_lut_values_bitwidth=0)
-    base_cfg = schema.OpQuantizationConfig(activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+    base_cfg = schema.OpQuantizationConfig(activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
                                            enable_activation_quantization=True,
                                            activation_n_bits=32,
                                            supported_input_activation_n_bits=32,
diff --git a/tests/keras_tests/function_tests/test_get_gptq_config.py b/tests/keras_tests/function_tests/test_get_gptq_config.py
index fb3515050..a87f30c5a 100644
--- a/tests/keras_tests/function_tests/test_get_gptq_config.py
+++ b/tests/keras_tests/function_tests/test_get_gptq_config.py
@@ -18,13 +18,13 @@
 import numpy as np
 
 import model_compression_toolkit as mct
+from mct_quantizers import QuantizationMethod
 
 from model_compression_toolkit.gptq import get_keras_gptq_config, keras_gradient_post_training_quantization, GradientPTQConfig, RoundingType
 from model_compression_toolkit.core import QuantizationConfig, QuantizationErrorMethod, CoreConfig
 from model_compression_toolkit import DefaultDict
 import tensorflow as tf
 
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.gptq.common.gptq_config import GPTQHessianScoresConfig
 from model_compression_toolkit.gptq.common.gptq_constants import QUANT_PARAM_LEARNING_STR, MAX_LSB_STR
 from model_compression_toolkit.gptq.keras.gptq_loss import multiple_tensors_mse_loss
diff --git a/tests/keras_tests/function_tests/test_gptq_soft_quantizer.py b/tests/keras_tests/function_tests/test_gptq_soft_quantizer.py
index a5c2134e1..e224d62b7 100644
--- a/tests/keras_tests/function_tests/test_gptq_soft_quantizer.py
+++ b/tests/keras_tests/function_tests/test_gptq_soft_quantizer.py
@@ -20,8 +20,8 @@
 from tensorflow.keras.layers import Conv2D, Input
 import numpy as np
 import model_compression_toolkit as mct
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import THRESHOLD, MIN_THRESHOLD
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.core.keras.constants import KERNEL
 from model_compression_toolkit.gptq.keras.quantizer.soft_rounding.symmetric_soft_quantizer import \
     SymmetricSoftRoundingGPTQ
@@ -30,7 +30,6 @@
 
 from tests.keras_tests.utils import get_layers_from_model_by_type
 
-tp = mct.target_platform
 
 
 def model_test(input_shape, num_channels=3, kernel_size=1):
diff --git a/tests/keras_tests/function_tests/test_graph_max_cut.py b/tests/keras_tests/function_tests/test_graph_max_cut.py
index 4a42402bc..780b2e3b4 100644
--- a/tests/keras_tests/function_tests/test_graph_max_cut.py
+++ b/tests/keras_tests/function_tests/test_graph_max_cut.py
@@ -26,7 +26,6 @@
 from model_compression_toolkit.core.keras.reader.reader import model_reader
 
 import model_compression_toolkit as mct
-tp = mct.target_platform
 
 
 def simple_model(input_shape):
diff --git a/tests/keras_tests/function_tests/test_hessian_info_calculator.py b/tests/keras_tests/function_tests/test_hessian_info_calculator.py
index ff07cb44d..058938a67 100644
--- a/tests/keras_tests/function_tests/test_hessian_info_calculator.py
+++ b/tests/keras_tests/function_tests/test_hessian_info_calculator.py
@@ -28,12 +28,11 @@
 from model_compression_toolkit.core.keras.data_util import data_gen_to_dataloader
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_configs
 
-tp = mct.target_platform
 
 
 def basic_model(input_shape, layer):
diff --git a/tests/keras_tests/function_tests/test_hessian_service.py b/tests/keras_tests/function_tests/test_hessian_service.py
index a651674e5..3b33b32c5 100644
--- a/tests/keras_tests/function_tests/test_hessian_service.py
+++ b/tests/keras_tests/function_tests/test_hessian_service.py
@@ -25,7 +25,7 @@
 from model_compression_toolkit.core.keras.data_util import data_gen_to_dataloader
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_configs
diff --git a/tests/keras_tests/function_tests/test_hmse_error_method.py b/tests/keras_tests/function_tests/test_hmse_error_method.py
index d64dbb5b7..6e068e575 100644
--- a/tests/keras_tests/function_tests/test_hmse_error_method.py
+++ b/tests/keras_tests/function_tests/test_hmse_error_method.py
@@ -32,7 +32,7 @@
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, KERAS_KERNEL, BIAS
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import AttributeQuantizationConfig
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
@@ -42,7 +42,6 @@
 from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_configs
 
-tp = mct.target_platform
 
 
 def model_gen():
@@ -137,44 +136,44 @@ def _run_node_verification(node_type):
         _run_node_verification(layers.Dense)
 
     def test_pot_threshold_selection_hmse_per_channel(self):
-        self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO, per_channel=True)
+        self._setup_with_args(quant_method=mct.QuantizationMethod.POWER_OF_TWO, per_channel=True)
         calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset,
                                       hessian_info_service=self.his, num_hessian_samples=1)
         self._verify_params_calculation_execution(THRESHOLD)
 
     def test_pot_threshold_selection_hmse_per_tensor(self):
-        self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO, per_channel=False)
+        self._setup_with_args(quant_method=mct.QuantizationMethod.POWER_OF_TWO, per_channel=False)
         calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset,
                                       hessian_info_service=self.his, num_hessian_samples=1)
         self._verify_params_calculation_execution(THRESHOLD)
 
     def test_symmetric_threshold_selection_hmse_per_channel(self):
-        self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.SYMMETRIC, per_channel=True)
+        self._setup_with_args(quant_method=mct.QuantizationMethod.SYMMETRIC, per_channel=True)
         calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset,
                                       hessian_info_service=self.his, num_hessian_samples=1)
         self._verify_params_calculation_execution(THRESHOLD)
 
     def test_symmetric_threshold_selection_hmse_per_tensor(self):
-        self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.SYMMETRIC, per_channel=False)
+        self._setup_with_args(quant_method=mct.QuantizationMethod.SYMMETRIC, per_channel=False)
         calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset,
                                       hessian_info_service=self.his, num_hessian_samples=1)
         self._verify_params_calculation_execution(THRESHOLD)
 
     def test_usniform_threshold_selection_hmse_per_channel(self):
-        self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.UNIFORM, per_channel=True)
+        self._setup_with_args(quant_method=mct.QuantizationMethod.UNIFORM, per_channel=True)
         calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset,
                                       hessian_info_service=self.his, num_hessian_samples=1)
         self._verify_params_calculation_execution(RANGE_MAX)
 
     def test_uniform_threshold_selection_hmse_per_tensor(self):
-        self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.UNIFORM, per_channel=False)
+        self._setup_with_args(quant_method=mct.QuantizationMethod.UNIFORM, per_channel=False)
         calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset,
                                       hessian_info_service=self.his, num_hessian_samples=1)
         self._verify_params_calculation_execution(RANGE_MAX)
 
     def test_threshold_selection_hmse_no_gptq(self):
         with self.assertRaises(ValueError) as e:
-            self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.SYMMETRIC, per_channel=True,
+            self._setup_with_args(quant_method=mct.QuantizationMethod.SYMMETRIC, per_channel=True,
                                   running_gptq=False)
         self.assertTrue('The HMSE error method for parameters selection is only supported when running GPTQ '
                         'optimization due to long execution time that is not suitable for basic PTQ.' in
@@ -201,7 +200,7 @@ def _generate_bn_quantization_tpc(quant_method, per_channel):
 
             return tpc
 
-        self._setup_with_args(quant_method=mct.target_platform.QuantizationMethod.SYMMETRIC, per_channel=True,
+        self._setup_with_args(quant_method=mct.QuantizationMethod.SYMMETRIC, per_channel=True,
                               tpc_fn=_generate_bn_quantization_tpc, model_gen_fn=no_bn_fusion_model_gen)
         calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset,
                                       hessian_info_service=self.his, num_hessian_samples=1)
diff --git a/tests/keras_tests/function_tests/test_kl_error_quantization_configurations.py b/tests/keras_tests/function_tests/test_kl_error_quantization_configurations.py
index 4a6a4a018..52d0bbbe3 100644
--- a/tests/keras_tests/function_tests/test_kl_error_quantization_configurations.py
+++ b/tests/keras_tests/function_tests/test_kl_error_quantization_configurations.py
@@ -38,9 +38,9 @@ def test_run_quantization_config_mbv1(self):
         def representative_data_gen():
             yield [x]
 
-        quantizer_methods = [mct.target_platform.QuantizationMethod.POWER_OF_TWO,
-                             mct.target_platform.QuantizationMethod.SYMMETRIC,
-                             mct.target_platform.QuantizationMethod.UNIFORM]
+        quantizer_methods = [mct.QuantizationMethod.POWER_OF_TWO,
+                             mct.QuantizationMethod.SYMMETRIC,
+                             mct.QuantizationMethod.UNIFORM]
 
         quantization_error_methods = [mct.core.QuantizationErrorMethod.KL]
         relu_bound_to_power_of_2 = [True, False]
diff --git a/tests/keras_tests/function_tests/test_layer_fusing.py b/tests/keras_tests/function_tests/test_layer_fusing.py
index db548b188..9da236bd5 100644
--- a/tests/keras_tests/function_tests/test_layer_fusing.py
+++ b/tests/keras_tests/function_tests/test_layer_fusing.py
@@ -8,7 +8,7 @@
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
     get_op_quantization_configs
@@ -23,7 +23,6 @@
 keras = tf.keras
 layers = keras.layers
 activations = keras.activations
-tp = mct.target_platform
 
 INPUT_SHAPE = (16, 16, 3)
 
@@ -185,8 +184,8 @@ def test_layer_fusing_1(self):
 
         qc = QuantizationConfig(custom_tpc_opset_to_layer={"Conv": CustomOpsetLayers([Conv2D]),
                                                            "AnyReLU": CustomOpsetLayers([tf.nn.relu,
-                                                                        tp.LayerFilterParams(ReLU, negative_slope=0.0),
-                                                                        tp.LayerFilterParams(Activation, activation="relu")])})
+                                                                        LayerFilterParams(ReLU, negative_slope=0.0),
+                                                                        LayerFilterParams(Activation, activation="relu")])})
 
         fusion_graph = prepare_graph_with_configs(model, KerasImplementation(), DEFAULT_KERAS_INFO,
                                                   representative_dataset, lambda name, _tp: get_tpc_1(),
diff --git a/tests/keras_tests/function_tests/test_quant_config_filtering.py b/tests/keras_tests/function_tests/test_quant_config_filtering.py
index 20b4b33aa..519f25b19 100644
--- a/tests/keras_tests/function_tests/test_quant_config_filtering.py
+++ b/tests/keras_tests/function_tests/test_quant_config_filtering.py
@@ -22,7 +22,7 @@
     QuantizationConfigOptions
 from model_compression_toolkit.target_platform_capabilities.schema.schema_functions import \
     get_config_options_by_operators_set
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from tests.common_tests.helpers.generate_test_tpc import generate_custom_test_tpc
 from tests.common_tests.helpers.tpcs_for_tests.v3.tpc import get_tpc
diff --git a/tests/keras_tests/function_tests/test_quantization_configurations.py b/tests/keras_tests/function_tests/test_quantization_configurations.py
index 5b44c1c3b..bbd7fccc4 100644
--- a/tests/keras_tests/function_tests/test_quantization_configurations.py
+++ b/tests/keras_tests/function_tests/test_quantization_configurations.py
@@ -41,9 +41,9 @@ def test_run_quantization_config(self):
         def representative_data_gen():
             yield [x]
 
-        quantizer_methods = [mct.target_platform.QuantizationMethod.POWER_OF_TWO,
-                             mct.target_platform.QuantizationMethod.SYMMETRIC,
-                             mct.target_platform.QuantizationMethod.UNIFORM]
+        quantizer_methods = [mct.QuantizationMethod.POWER_OF_TWO,
+                             mct.QuantizationMethod.SYMMETRIC,
+                             mct.QuantizationMethod.UNIFORM]
 
         quantization_error_methods = [mct.core.QuantizationErrorMethod.MSE,
                                       mct.core.QuantizationErrorMethod.NOCLIPPING,
diff --git a/tests/keras_tests/function_tests/test_sensitivity_eval_non_suppoerted_output.py b/tests/keras_tests/function_tests/test_sensitivity_eval_non_suppoerted_output.py
index 7eebd5687..9a775a209 100644
--- a/tests/keras_tests/function_tests/test_sensitivity_eval_non_suppoerted_output.py
+++ b/tests/keras_tests/function_tests/test_sensitivity_eval_non_suppoerted_output.py
@@ -20,7 +20,7 @@
 from model_compression_toolkit.core import MixedPrecisionQuantizationConfig
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_quantization_parameters
diff --git a/tests/keras_tests/function_tests/test_sensitivity_metric_interest_points.py b/tests/keras_tests/function_tests/test_sensitivity_metric_interest_points.py
index e46b29e33..4774b4ce4 100644
--- a/tests/keras_tests/function_tests/test_sensitivity_metric_interest_points.py
+++ b/tests/keras_tests/function_tests/test_sensitivity_metric_interest_points.py
@@ -18,6 +18,9 @@
 from keras.applications.densenet import DenseNet121
 from keras.applications.mobilenet_v2 import MobileNetV2
 
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
+    AttachTpcToKeras
+
 if tf.__version__ >= "2.13":
     from keras.src.engine.input_layer import InputLayer
     from keras.src.layers.core import TFOpLambda
@@ -26,8 +29,6 @@
     from keras.layers.core import TFOpLambda
 
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
-    AttachTpcToKeras
 
 from model_compression_toolkit.constants import AXIS
 from model_compression_toolkit.core.common.mixed_precision.distance_weighting import MpDistanceWeighting
diff --git a/tests/keras_tests/function_tests/test_set_layer_to_bitwidth.py b/tests/keras_tests/function_tests/test_set_layer_to_bitwidth.py
index 29c61dc24..b6fd4560e 100644
--- a/tests/keras_tests/function_tests/test_set_layer_to_bitwidth.py
+++ b/tests/keras_tests/function_tests/test_set_layer_to_bitwidth.py
@@ -21,6 +21,8 @@
 from keras.layers import Conv2D
 
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
+    AttachTpcToKeras
 
 if tf.__version__ >= "2.13":
     from keras.src.engine.input_layer import InputLayer
@@ -29,8 +31,6 @@
 
 from mct_quantizers import KerasActivationQuantizationHolder
 from model_compression_toolkit.core import QuantizationConfig
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
-    AttachTpcToKeras
 
 from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper
 from model_compression_toolkit.core.common.mixed_precision.set_layer_to_bitwidth import set_layer_to_bitwidth
diff --git a/tests/keras_tests/function_tests/test_symmetric_threshold_selection_weights.py b/tests/keras_tests/function_tests/test_symmetric_threshold_selection_weights.py
index 745273eb1..c063c6188 100644
--- a/tests/keras_tests/function_tests/test_symmetric_threshold_selection_weights.py
+++ b/tests/keras_tests/function_tests/test_symmetric_threshold_selection_weights.py
@@ -22,7 +22,7 @@
 from model_compression_toolkit.core import QuantizationConfig, QuantizationErrorMethod
 from model_compression_toolkit.constants import THRESHOLD
 from model_compression_toolkit.core.keras.constants import KERNEL
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
@@ -58,7 +58,7 @@ def representative_dataset():
 
 def get_tpc(per_channel):
     tp = generate_test_tpc(edit_params_dict={
-        'weights_quantization_method': mct.target_platform.QuantizationMethod.SYMMETRIC,
+        'weights_quantization_method': mct.QuantizationMethod.SYMMETRIC,
         'weights_per_channel_threshold': per_channel})
     tpc = generate_keras_tpc(name="symmetric_threshold_selection_test", tpc=tp)
 
diff --git a/tests/keras_tests/function_tests/test_uniform_range_selection_weights.py b/tests/keras_tests/function_tests/test_uniform_range_selection_weights.py
index e7087b2b6..57599b0e7 100644
--- a/tests/keras_tests/function_tests/test_uniform_range_selection_weights.py
+++ b/tests/keras_tests/function_tests/test_uniform_range_selection_weights.py
@@ -22,7 +22,7 @@
 from model_compression_toolkit.core import QuantizationConfig, QuantizationErrorMethod
 from model_compression_toolkit.constants import RANGE_MIN, RANGE_MAX
 from model_compression_toolkit.core.keras.constants import KERNEL
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
@@ -57,7 +57,7 @@ def representative_dataset():
 
 def get_tpc(per_channel):
     tp = generate_test_tpc({
-        'weights_quantization_method': mct.target_platform.QuantizationMethod.UNIFORM,
+        'weights_quantization_method': mct.QuantizationMethod.UNIFORM,
         'weights_per_channel_threshold': per_channel})
     tpc = generate_keras_tpc(name="uniform_range_selection_test", tpc=tp)
 
diff --git a/tests/keras_tests/function_tests/test_weights_activation_split_substitution.py b/tests/keras_tests/function_tests/test_weights_activation_split_substitution.py
index e3f6020cb..8eb93cd72 100644
--- a/tests/keras_tests/function_tests/test_weights_activation_split_substitution.py
+++ b/tests/keras_tests/function_tests/test_weights_activation_split_substitution.py
@@ -18,6 +18,8 @@
 import unittest
 
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
+    AttachTpcToKeras
 
 if tf.__version__ >= "2.13":
     from keras.src.layers import Conv2D, Conv2DTranspose, DepthwiseConv2D, Dense, BatchNormalization, ReLU, Input
@@ -36,15 +38,12 @@
     WeightsActivationSplit
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
 from model_compression_toolkit.core.common.substitutions.apply_substitutions import substitute
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
-    AttachTpcToKeras
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs
 
 import model_compression_toolkit as mct
 from tests.common_tests.helpers.prep_graph_for_func_test import prepare_graph_with_configs
 from tests.keras_tests.tpc_keras import get_tpc_with_activation_mp_keras
 
-tp = mct.target_platform
 
 INPUT_SHAPE = (8, 8, 3)
 
diff --git a/tests/keras_tests/non_parallel_tests/test_keras_tpc.py b/tests/keras_tests/non_parallel_tests/test_keras_tpc.py
index 973a6a6b5..c6e86bdbb 100644
--- a/tests/keras_tests/non_parallel_tests/test_keras_tpc.py
+++ b/tests/keras_tests/non_parallel_tests/test_keras_tpc.py
@@ -25,6 +25,13 @@
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.defaultdict import DefaultDict
 from model_compression_toolkit.core.common import BaseNode
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attribute_filter import Greater, \
+    Smaller, GreaterEq, Eq, SmallerEq, Contains
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \
+    FrameworkQuantizationCapabilities
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.operations_to_layers import \
+    OperationsSetToLayers
 from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs
 
 if version.parse(tf.__version__) >= version.parse("2.13"):
@@ -36,17 +43,10 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.constants import TENSORFLOW
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import \
-    LayerFilterParams
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import \
-    Greater, \
-    Smaller, GreaterEq, Eq, SmallerEq, Contains
 from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL, IMX500_TP_MODEL, \
     QNNPACK_TP_MODEL, TFLITE_TP_MODEL, KERNEL_ATTR, BIAS_ATTR, KERAS_KERNEL, BIAS, WEIGHTS_N_BITS
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
 
-tp = mct.target_platform
 
 TEST_QC = generate_test_op_qc(**generate_test_attr_configs())
 TEST_QCO = schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC]))
@@ -116,7 +116,7 @@ def test_get_layers_by_op(self):
         fw_tp = FrameworkQuantizationCapabilities(hm)
         with fw_tp:
             opset_layers = [Conv2D, LayerFilterParams(ReLU, max_value=2)]
-            tp.OperationsSetToLayers('opsetA', opset_layers)
+            OperationsSetToLayers('opsetA', opset_layers)
         self.assertEqual(fw_tp.get_layers_by_opset_name('opsetA'), opset_layers)
         self.assertEqual(fw_tp.get_layers_by_opset(op_obj), opset_layers)
         self.assertEqual(fw_tp.get_layers_by_opset_name('nonExistingOpsetName'), None)
@@ -137,8 +137,8 @@ def test_get_layers_by_opconcat(self):
         with fw_tp:
             opset_layers_a = [Conv2D]
             opset_layers_b = [LayerFilterParams(ReLU, max_value=2)]
-            tp.OperationsSetToLayers('opsetA', opset_layers_a)
-            tp.OperationsSetToLayers('opsetB', opset_layers_b)
+            OperationsSetToLayers('opsetA', opset_layers_a)
+            OperationsSetToLayers('opsetB', opset_layers_b)
 
         self.assertEqual(fw_tp.get_layers_by_opset(op_concat), opset_layers_a + opset_layers_b)
 
@@ -156,8 +156,8 @@ def test_layer_attached_to_multiple_opsets(self):
         fw_tp = FrameworkQuantizationCapabilities(hm)
         with self.assertRaises(Exception) as e:
             with fw_tp:
-                tp.OperationsSetToLayers('opsetA', [Conv2D])
-                tp.OperationsSetToLayers('opsetB', [Conv2D])
+                OperationsSetToLayers('opsetA', [Conv2D])
+                OperationsSetToLayers('opsetB', [Conv2D])
         self.assertEqual('Found layer Conv2D in more than one OperatorsSet', str(e.exception))
 
     def test_filter_layer_attached_to_multiple_opsets(self):
@@ -172,8 +172,8 @@ def test_filter_layer_attached_to_multiple_opsets(self):
         fw_tp = FrameworkQuantizationCapabilities(hm)
         with self.assertRaises(Exception) as e:
             with fw_tp:
-                tp.OperationsSetToLayers('opsetA', [LayerFilterParams(Activation, activation="relu")])
-                tp.OperationsSetToLayers('opsetB', [LayerFilterParams(Activation, activation="relu")])
+                OperationsSetToLayers('opsetA', [LayerFilterParams(Activation, activation="relu")])
+                OperationsSetToLayers('opsetB', [LayerFilterParams(Activation, activation="relu")])
         self.assertEqual('Found layer Activation(activation=relu) in more than one OperatorsSet', str(e.exception))
 
     def test_qco_by_keras_layer(self):
@@ -200,13 +200,13 @@ def test_qco_by_keras_layer(self):
                                                 add_metadata=False,
                                                 name='test')
 
-        tpc_keras = tp.FrameworkQuantizationCapabilities(tpm)
+        tpc_keras = FrameworkQuantizationCapabilities(tpm)
         with tpc_keras:
-            tp.OperationsSetToLayers("conv", [Conv2D],
+            OperationsSetToLayers("conv", [Conv2D],
                                      attr_mapping={KERNEL_ATTR: DefaultDict(default_value=KERAS_KERNEL),
                                                    BIAS_ATTR: DefaultDict(default_value=BIAS)})
-            tp.OperationsSetToLayers("tanh", [tf.nn.tanh])
-            tp.OperationsSetToLayers("relu", [LayerFilterParams(Activation, activation="relu")])
+            OperationsSetToLayers("tanh", [tf.nn.tanh])
+            OperationsSetToLayers("relu", [LayerFilterParams(Activation, activation="relu")])
 
         conv_node = get_node(Conv2D(1, 1))
         tanh_node = get_node(tf.nn.tanh)
@@ -234,7 +234,7 @@ def test_qco_by_keras_layer(self):
     #                                     tpc_platform_type=None,
     #                                     operator_set=tuple([schema.OperatorsSet(name="opA")]),
     #                                     add_metadata=False)
-    #     hm_keras = tp.FrameworkQuantizationCapabilities(hm)
+    #     hm_keras = FrameworkQuantizationCapabilities(hm)
     #     with self.assertRaises(Exception) as e:
     #         with hm_keras:
     #             tp.OperationsSetToLayers("conv", [Conv2D])
@@ -259,11 +259,11 @@ def test_keras_fusing_patterns(self):
                                                fusing_patterns=tuple(fusing_patterns),
                                                add_metadata=False)
 
-        hm_keras = tp.FrameworkQuantizationCapabilities(hm)
+        hm_keras = FrameworkQuantizationCapabilities(hm)
         with hm_keras:
-            tp.OperationsSetToLayers("opA", [Conv2D])
-            tp.OperationsSetToLayers("opB", [tf.nn.tanh])
-            tp.OperationsSetToLayers("opC", [LayerFilterParams(ReLU, Greater("max_value", 7), negative_slope=0)])
+            OperationsSetToLayers("opA", [Conv2D])
+            OperationsSetToLayers("opB", [tf.nn.tanh])
+            OperationsSetToLayers("opC", [LayerFilterParams(ReLU, Greater("max_value", 7), negative_slope=0)])
 
         fusings = hm_keras.get_fusing_patterns()
         self.assertEqual(len(fusings), 2)
@@ -287,9 +287,9 @@ def test_get_default_op_qc(self):
                                                 operator_set=tuple([schema.OperatorsSet(name="opA")]),
                                                 add_metadata=False)
 
-        tpc = tp.FrameworkQuantizationCapabilities(tpm)
+        tpc = FrameworkQuantizationCapabilities(tpm)
         with tpc:
-            tp.OperationsSetToLayers("opA", [Conv2D])
+            OperationsSetToLayers("opA", [Conv2D])
 
         d_qco = tpc.get_default_op_qc()
         self.assertEqual(d_qco, TEST_QC)
diff --git a/tests/keras_tests/non_parallel_tests/test_lp_search_bitwidth.py b/tests/keras_tests/non_parallel_tests/test_lp_search_bitwidth.py
index af560beaf..f4cee33c5 100644
--- a/tests/keras_tests/non_parallel_tests/test_lp_search_bitwidth.py
+++ b/tests/keras_tests/non_parallel_tests/test_lp_search_bitwidth.py
@@ -39,7 +39,7 @@
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
     get_op_quantization_configs
diff --git a/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py b/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py
index abaeda1aa..120ef70a9 100644
--- a/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py
+++ b/tests/keras_tests/non_parallel_tests/test_tensorboard_writer.py
@@ -33,7 +33,7 @@
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs
diff --git a/tests/keras_tests/pruning_tests/test_memory_calculator.py b/tests/keras_tests/pruning_tests/test_memory_calculator.py
index 2047c34f8..0d9d56fcc 100644
--- a/tests/keras_tests/pruning_tests/test_memory_calculator.py
+++ b/tests/keras_tests/pruning_tests/test_memory_calculator.py
@@ -24,7 +24,7 @@
 
 import keras
 
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attach2keras import \
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 
 layers = keras.layers
diff --git a/tests/keras_tests/tpc_keras.py b/tests/keras_tests/tpc_keras.py
index d73e48c0d..ed0f382b9 100644
--- a/tests/keras_tests/tpc_keras.py
+++ b/tests/keras_tests/tpc_keras.py
@@ -32,12 +32,11 @@
     generate_mixed_precision_test_tpc, generate_tpc_with_activation_mp
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 
-tp = mct.target_platform
 
 
 def get_tpc(name, weight_bits=8, activation_bits=8,
-            weights_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO,
-            activation_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO,
+            weights_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
+            activation_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
             per_channel=True):
     tpc = generate_test_tpc({'weights_n_bits': weight_bits,
                                        'activation_n_bits': activation_bits,
diff --git a/tests/keras_tests/trainable_infrastructure_tests/base_keras_trainable_infra_test.py b/tests/keras_tests/trainable_infrastructure_tests/base_keras_trainable_infra_test.py
index df4f5df5f..f739052d2 100644
--- a/tests/keras_tests/trainable_infrastructure_tests/base_keras_trainable_infra_test.py
+++ b/tests/keras_tests/trainable_infrastructure_tests/base_keras_trainable_infra_test.py
@@ -17,8 +17,7 @@
 import numpy as np
 import tensorflow as tf
 from tensorflow import TensorShape
-
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.trainable_infrastructure import KerasTrainableQuantizationWrapper
 from mct_quantizers import QuantizationTarget, mark_quantizer
 from model_compression_toolkit.trainable_infrastructure import BaseKerasTrainableQuantizer
diff --git a/tests/keras_tests/trainable_infrastructure_tests/test_keras_trainable_infra_runner.py b/tests/keras_tests/trainable_infrastructure_tests/test_keras_trainable_infra_runner.py
index 35366d02c..ef2fd206b 100644
--- a/tests/keras_tests/trainable_infrastructure_tests/test_keras_trainable_infra_runner.py
+++ b/tests/keras_tests/trainable_infrastructure_tests/test_keras_trainable_infra_runner.py
@@ -16,7 +16,7 @@
 import tensorflow as tf
 
 from model_compression_toolkit.trainable_infrastructure import TrainingMethod
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import QuantizationTarget
 from model_compression_toolkit.qat.keras.quantizer.ste_rounding.symmetric_ste import STEWeightQATQuantizer
 from model_compression_toolkit.qat.keras.quantizer.ste_rounding.uniform_ste import STEUniformWeightQATQuantizer
diff --git a/tests/keras_tests/trainable_infrastructure_tests/trainable_keras/test_keras_base_quantizer.py b/tests/keras_tests/trainable_infrastructure_tests/trainable_keras/test_keras_base_quantizer.py
index cdee473c5..252622eeb 100644
--- a/tests/keras_tests/trainable_infrastructure_tests/trainable_keras/test_keras_base_quantizer.py
+++ b/tests/keras_tests/trainable_infrastructure_tests/trainable_keras/test_keras_base_quantizer.py
@@ -14,7 +14,7 @@
 # ==============================================================================
 from typing import List, Any
 
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.trainable_infrastructure import BaseKerasTrainableQuantizer
 from model_compression_toolkit.trainable_infrastructure.common.base_trainable_quantizer import VariableGroup
 from model_compression_toolkit.trainable_infrastructure.common.trainable_quantizer_config import \
diff --git a/tests/pytorch_tests/function_tests/get_gptq_config_test.py b/tests/pytorch_tests/function_tests/get_gptq_config_test.py
index 8e4f59610..a1e8faf1a 100644
--- a/tests/pytorch_tests/function_tests/get_gptq_config_test.py
+++ b/tests/pytorch_tests/function_tests/get_gptq_config_test.py
@@ -17,16 +17,15 @@
 from torch import nn
 
 import model_compression_toolkit as mct
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.gptq import get_pytorch_gptq_config, pytorch_gradient_post_training_quantization, RoundingType
 from model_compression_toolkit.core import CoreConfig, QuantizationConfig, QuantizationErrorMethod
 from model_compression_toolkit import DefaultDict
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.gptq.common.gptq_constants import QUANT_PARAM_LEARNING_STR, MAX_LSB_STR
 from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 
-tp = mct.target_platform
 
 
 class TestModel(nn.Module):
diff --git a/tests/pytorch_tests/function_tests/layer_fusing_test.py b/tests/pytorch_tests/function_tests/layer_fusing_test.py
index 8065ce500..399b427f8 100644
--- a/tests/pytorch_tests/function_tests/layer_fusing_test.py
+++ b/tests/pytorch_tests/function_tests/layer_fusing_test.py
@@ -19,10 +19,10 @@
 
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
 from model_compression_toolkit.core import QuantizationConfig
-from model_compression_toolkit.target_platform_capabilities.target_platform import LayerFilterParams
 from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
 from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams
 from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
@@ -32,7 +32,6 @@
 
 import model_compression_toolkit as mct
 
-tp = mct.target_platform
 
 
 class BaseLayerFusingTest(BasePytorchTest):
diff --git a/tests/pytorch_tests/function_tests/test_fully_quantized_exporter.py b/tests/pytorch_tests/function_tests/test_fully_quantized_exporter.py
index 0a9cdf883..3c9b2581c 100644
--- a/tests/pytorch_tests/function_tests/test_fully_quantized_exporter.py
+++ b/tests/pytorch_tests/function_tests/test_fully_quantized_exporter.py
@@ -27,7 +27,6 @@
 import model_compression_toolkit as mct
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
 
-tp = mct.target_platform
 
 
 class TestFullyQuantizedExporter(unittest.TestCase):
diff --git a/tests/pytorch_tests/function_tests/test_function_runner.py b/tests/pytorch_tests/function_tests/test_function_runner.py
index 0ab7e6214..788c45a34 100644
--- a/tests/pytorch_tests/function_tests/test_function_runner.py
+++ b/tests/pytorch_tests/function_tests/test_function_runner.py
@@ -14,8 +14,8 @@
 # ==============================================================================
 import unittest
 
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.gptq import RoundingType
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from tests.pytorch_tests.function_tests.bn_info_collection_test import BNInfoCollectionTest, \
     Conv2D2BNInfoCollectionTest, Conv2DBNChainInfoCollectionTest, BNChainInfoCollectionTest, \
     BNLayerInfoCollectionTest, INP2BNInfoCollectionTest
diff --git a/tests/pytorch_tests/function_tests/test_gptq_soft_quantizer.py b/tests/pytorch_tests/function_tests/test_gptq_soft_quantizer.py
index 83a6e7ac0..86a35b5e2 100644
--- a/tests/pytorch_tests/function_tests/test_gptq_soft_quantizer.py
+++ b/tests/pytorch_tests/function_tests/test_gptq_soft_quantizer.py
@@ -5,8 +5,7 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.constants import THRESHOLD, MIN_THRESHOLD
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
-from mct_quantizers import PytorchQuantizationWrapper
+from mct_quantizers import PytorchQuantizationWrapper, QuantizationMethod
 from model_compression_toolkit.core.pytorch.constants import KERNEL
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
 from model_compression_toolkit.gptq.pytorch.quantizer.soft_rounding.symmetric_soft_quantizer import \
@@ -14,7 +13,6 @@
 
 from model_compression_toolkit.trainable_infrastructure import TrainableQuantizerWeightsConfig
 
-tp = mct.target_platform
 
 
 class model_test(torch.nn.Module):
diff --git a/tests/pytorch_tests/function_tests/test_pytorch_tpc.py b/tests/pytorch_tests/function_tests/test_pytorch_tpc.py
index be89034b3..0b40f03e6 100644
--- a/tests/pytorch_tests/function_tests/test_pytorch_tpc.py
+++ b/tests/pytorch_tests/function_tests/test_pytorch_tpc.py
@@ -28,18 +28,17 @@
 from model_compression_toolkit.defaultdict import DefaultDict
 from model_compression_toolkit.constants import PYTORCH
 from model_compression_toolkit.core.common import BaseNode
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework import \
-    LayerFilterParams
-from model_compression_toolkit.target_platform_capabilities.target_platform.targetplatform2framework.attribute_filter import \
-    Greater, Smaller, Eq
 from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL, IMX500_TP_MODEL, \
     TFLITE_TP_MODEL, QNNPACK_TP_MODEL, KERNEL_ATTR, WEIGHTS_N_BITS, PYTORCH_KERNEL, BIAS_ATTR, BIAS
 from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attribute_filter import Greater, \
+    Smaller, Eq
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \
+    FrameworkQuantizationCapabilities
 from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs
 from tests.pytorch_tests.layer_tests.base_pytorch_layer_test import LayerTestModel
 
-tp = mct.target_platform
 
 TEST_QC = generate_test_op_qc(**generate_test_attr_configs())
 TEST_QCO = schema.QuantizationConfigOptions(quantization_configurations=tuple([TEST_QC]))
@@ -113,7 +112,7 @@ def test_qco_by_pytorch_layer(self):
                                                 add_metadata=False,
                                                 name='test')
 
-        tpc_pytorch = tp.FrameworkQuantizationCapabilities(tpm)
+        tpc_pytorch = FrameworkQuantizationCapabilities(tpm)
         with tpc_pytorch:
             tp.OperationsSetToLayers("conv", [torch.nn.Conv2d],
                                      attr_mapping={KERNEL_ATTR: DefaultDict(default_value=PYTORCH_KERNEL),
diff --git a/tests/pytorch_tests/function_tests/test_quantization_configurations.py b/tests/pytorch_tests/function_tests/test_quantization_configurations.py
index 4f59c267c..c43c5c67f 100644
--- a/tests/pytorch_tests/function_tests/test_quantization_configurations.py
+++ b/tests/pytorch_tests/function_tests/test_quantization_configurations.py
@@ -50,9 +50,9 @@ def test_run_quantization_config(self):
         def representative_data_gen():
             yield [x]
 
-        quantizer_methods = [mct.target_platform.QuantizationMethod.POWER_OF_TWO,
-                             mct.target_platform.QuantizationMethod.SYMMETRIC,
-                             mct.target_platform.QuantizationMethod.UNIFORM]
+        quantizer_methods = [mct.QuantizationMethod.POWER_OF_TWO,
+                             mct.QuantizationMethod.SYMMETRIC,
+                             mct.QuantizationMethod.UNIFORM]
 
         quantization_error_methods = [mct.core.QuantizationErrorMethod.MSE,
                                       mct.core.QuantizationErrorMethod.NOCLIPPING,
diff --git a/tests/pytorch_tests/model_tests/feature_models/bn_attributes_quantization_test.py b/tests/pytorch_tests/model_tests/feature_models/bn_attributes_quantization_test.py
index 2d78dd778..9e5bb2000 100644
--- a/tests/pytorch_tests/model_tests/feature_models/bn_attributes_quantization_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/bn_attributes_quantization_test.py
@@ -30,7 +30,6 @@
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 from tests.pytorch_tests.utils import get_layers_from_model_by_type
 
-tp = mct.target_platform
 
 
 def _generate_bn_quantized_tpm(quantize_linear):
diff --git a/tests/pytorch_tests/model_tests/feature_models/compute_max_cut_test.py b/tests/pytorch_tests/model_tests/feature_models/compute_max_cut_test.py
index 406957445..e72c55187 100644
--- a/tests/pytorch_tests/model_tests/feature_models/compute_max_cut_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/compute_max_cut_test.py
@@ -21,7 +21,6 @@
 from model_compression_toolkit.constants import PYTORCH
 from mct_quantizers.pytorch.metadata import get_metadata
 
-tp = mct.target_platform
 
 
 class MaxCutModel(nn.Module):
diff --git a/tests/pytorch_tests/model_tests/feature_models/const_quantization_test.py b/tests/pytorch_tests/model_tests/feature_models/const_quantization_test.py
index 56b8cdbab..3de54ff86 100644
--- a/tests/pytorch_tests/model_tests/feature_models/const_quantization_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/const_quantization_test.py
@@ -28,9 +28,7 @@
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
 from tests.pytorch_tests.utils import get_layers_from_model_by_type
 from tests.common_tests.helpers.generate_test_tpc import generate_test_attr_configs, DEFAULT_WEIGHT_ATTR_CONFIG
-from mct_quantizers import PytorchQuantizationWrapper
-
-tp = mct.target_platform
+from mct_quantizers import PytorchQuantizationWrapper, QuantizationMethod
 
 
 class ConstQuantizationNet(nn.Module):
@@ -231,9 +229,8 @@ def get_core_config(self):
                                                                  {"WeightQuant": CustomOpsetLayers([torch.Tensor.expand, torch.cat])}))
 
     def get_tpc(self):
-        tp = mct.target_platform
         attr_cfg = generate_test_attr_configs()
-        base_cfg = schema.OpQuantizationConfig(activation_quantization_method=tp.QuantizationMethod.POWER_OF_TWO,
+        base_cfg = schema.OpQuantizationConfig(activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
                                                enable_activation_quantization=True,
                                                activation_n_bits=32,
                                                supported_input_activation_n_bits=32,
@@ -251,7 +248,7 @@ def get_tpc(self):
                                                default_weight_attr_config=base_cfg.default_weight_attr_config.clone_and_edit(
                                                    enable_weights_quantization=True,
                                                    weights_per_channel_threshold=False,
-                                                   weights_quantization_method=tp.QuantizationMethod.POWER_OF_TWO))
+                                                   weights_quantization_method=QuantizationMethod.POWER_OF_TWO))
         const_configuration_options = schema.QuantizationConfigOptions(quantization_configurations=tuple([const_config]))
 
         tpc = schema.TargetPlatformCapabilities(
diff --git a/tests/pytorch_tests/model_tests/feature_models/const_representation_test.py b/tests/pytorch_tests/model_tests/feature_models/const_representation_test.py
index c128d963b..70dcbb9de 100644
--- a/tests/pytorch_tests/model_tests/feature_models/const_representation_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/const_representation_test.py
@@ -22,7 +22,6 @@
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
 
-tp = mct.target_platform
 
 
 class ConstRepresentationNet(nn.Module):
diff --git a/tests/pytorch_tests/model_tests/feature_models/constant_conv_substitution_test.py b/tests/pytorch_tests/model_tests/feature_models/constant_conv_substitution_test.py
index 8558ca28e..9645422f2 100644
--- a/tests/pytorch_tests/model_tests/feature_models/constant_conv_substitution_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/constant_conv_substitution_test.py
@@ -22,7 +22,6 @@
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 import numpy as np
 
-tp = mct.target_platform
 
 
 class BaseConstantConvSubstitutionTest(BasePytorchFeatureNetworkTest):
diff --git a/tests/pytorch_tests/model_tests/feature_models/conv2d_replacement_test.py b/tests/pytorch_tests/model_tests/feature_models/conv2d_replacement_test.py
index 08f47ace3..d5fbd81db 100644
--- a/tests/pytorch_tests/model_tests/feature_models/conv2d_replacement_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/conv2d_replacement_test.py
@@ -21,7 +21,6 @@
 from model_compression_toolkit.core.common.network_editors.actions import EditRule, ReplaceLayer
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 
-tp = mct.target_platform
 
 
 def get_new_weights_for_identity_dw_conv2d_layer(weights={}, activation_quantization_params={}, **kwargs):
diff --git a/tests/pytorch_tests/model_tests/feature_models/gptq_test.py b/tests/pytorch_tests/model_tests/feature_models/gptq_test.py
index 9349e12f1..19d81240e 100644
--- a/tests/pytorch_tests/model_tests/feature_models/gptq_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/gptq_test.py
@@ -18,6 +18,7 @@
 import torch.nn as nn
 
 import model_compression_toolkit as mct
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit import DefaultDict
 from model_compression_toolkit.constants import GPTQ_HESSIAN_NUM_SAMPLES
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor, torch_tensor_to_numpy, set_model
@@ -25,14 +26,11 @@
     GPTQHessianScoresConfig, GradualActivationQuantizationConfig
 from model_compression_toolkit.gptq.common.gptq_constants import QUANT_PARAM_LEARNING_STR, MAX_LSB_STR
 from model_compression_toolkit.gptq.pytorch.gptq_loss import multiple_tensors_mse_loss
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.pytorch_tests.model_tests.base_pytorch_feature_test import BasePytorchFeatureNetworkTest
 from tests.pytorch_tests.utils import extract_model_weights
 
-tp = mct.target_platform
-
 
 class TestModel(nn.Module):
     def __init__(self):
diff --git a/tests/pytorch_tests/model_tests/feature_models/linear_collapsing_test.py b/tests/pytorch_tests/model_tests/feature_models/linear_collapsing_test.py
index 24c28eb1e..125c0deec 100644
--- a/tests/pytorch_tests/model_tests/feature_models/linear_collapsing_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/linear_collapsing_test.py
@@ -22,7 +22,6 @@
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
 
-tp = mct.target_platform
 
 
 class BaseConv2DCollapsingTest(BasePytorchFeatureNetworkTest, ABC):
diff --git a/tests/pytorch_tests/model_tests/feature_models/lut_quantizer_test.py b/tests/pytorch_tests/model_tests/feature_models/lut_quantizer_test.py
index 340920081..9a63ac6a9 100644
--- a/tests/pytorch_tests/model_tests/feature_models/lut_quantizer_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/lut_quantizer_test.py
@@ -24,7 +24,6 @@
 from tests.pytorch_tests.tpc_pytorch import get_pytorch_test_tpc_dict
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 
-tp = mct.target_platform
 
 
 def get_uniform_weights(out_channels, in_channels, kernel):
@@ -80,7 +79,7 @@ class LUTWeightsQuantizerTest(BasePytorchTest):
     We check that the weights have different values for conv1 and conv2, and that conv2 and conv3 have the same
     values.
     """
-    def __init__(self, unit_test, weights_n_bits=4, quant_method=tp.QuantizationMethod.LUT_POT_QUANTIZER):
+    def __init__(self, unit_test, weights_n_bits=4, quant_method=QuantizationMethod.LUT_POT_QUANTIZER):
         super().__init__(unit_test)
         self.weights_n_bits = weights_n_bits
         self.quant_method = quant_method
@@ -134,7 +133,7 @@ def __init__(self, unit_test, activation_n_bits=4):
     def get_tpc(self):
         return get_pytorch_test_tpc_dict(
             tpc=generate_test_tpc({"activation_n_bits": self.activation_n_bits,
-                                             "activation_quantization_method": tp.QuantizationMethod.LUT_POT_QUANTIZER}),
+                                             "activation_quantization_method": QuantizationMethod.LUT_POT_QUANTIZER}),
             test_name='lut_quantizer_test',
             ftp_name='lut_quantizer_pytorch_test')
 
diff --git a/tests/pytorch_tests/model_tests/feature_models/metadata_test.py b/tests/pytorch_tests/model_tests/feature_models/metadata_test.py
index f2accc9a4..ad88fdba3 100644
--- a/tests/pytorch_tests/model_tests/feature_models/metadata_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/metadata_test.py
@@ -28,8 +28,6 @@
 import tempfile
 import os
 
-tp = mct.target_platform
-
 
 class DummyNet(nn.Module):
     def __init__(self):
diff --git a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py
index adefc81ab..dfc9edb13 100644
--- a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_activation_test.py
@@ -24,7 +24,6 @@
 from model_compression_toolkit.core.pytorch.reader.node_holders import DummyPlaceHolder
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, PYTORCH_KERNEL, \
     BIAS
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities, OperationsSetToLayers
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OperatorsSet, \
     QuantizationConfigOptions
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
diff --git a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py
index 03ddf2a73..f09fb5b53 100644
--- a/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/mixed_precision_weights_test.py
@@ -23,8 +23,6 @@
 from model_compression_toolkit.core.common.user_info import UserInformation
 from model_compression_toolkit.core.pytorch.constants import BIAS
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, PYTORCH_KERNEL, BIAS_ATTR
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities, \
-    OperationsSetToLayers
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OperatorsSet, \
     QuantizationConfigOptions
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
@@ -35,7 +33,6 @@
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 import model_compression_toolkit as mct
 
-tp = mct.target_platform
 
 """
 This test checks the Mixed Precision feature.
diff --git a/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py b/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py
index 17c55037f..db35f94e0 100644
--- a/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py
@@ -20,7 +20,6 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 
diff --git a/tests/pytorch_tests/model_tests/feature_models/permute_substitution_test.py b/tests/pytorch_tests/model_tests/feature_models/permute_substitution_test.py
index d65b1c156..a1bb5a481 100644
--- a/tests/pytorch_tests/model_tests/feature_models/permute_substitution_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/permute_substitution_test.py
@@ -21,9 +21,6 @@
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 
 
-tp = mct.target_platform
-
-
 class BasePermuteSubstitutionTest(BasePytorchFeatureNetworkTest):
 
     def __init__(self, unit_test):
diff --git a/tests/pytorch_tests/model_tests/feature_models/qat_test.py b/tests/pytorch_tests/model_tests/feature_models/qat_test.py
index c0c697b41..43c5f3b37 100644
--- a/tests/pytorch_tests/model_tests/feature_models/qat_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/qat_test.py
@@ -94,8 +94,8 @@ def repr_datagen():
 
 class QuantizationAwareTrainingTest(BasePytorchFeatureNetworkTest):
     def __init__(self, unit_test, weight_bits=2, activation_bits=4,
-                 weights_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO,
-                 activation_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO,
+                 weights_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
+                 activation_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
                  training_method=TrainingMethod.STE,
                  finalize=False, test_loading=False):
 
diff --git a/tests/pytorch_tests/model_tests/feature_models/relu_replacement_test.py b/tests/pytorch_tests/model_tests/feature_models/relu_replacement_test.py
index 6614ee888..cd8637aec 100644
--- a/tests/pytorch_tests/model_tests/feature_models/relu_replacement_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/relu_replacement_test.py
@@ -21,8 +21,6 @@
 from model_compression_toolkit.core.common.network_editors.node_filters import NodeNameFilter, NodeTypeFilter
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 
-tp = mct.target_platform
-
 
 class Identity(torch.nn.Module):
     """
diff --git a/tests/pytorch_tests/model_tests/feature_models/reshape_substitution_test.py b/tests/pytorch_tests/model_tests/feature_models/reshape_substitution_test.py
index d0b22afd7..1c22ae819 100644
--- a/tests/pytorch_tests/model_tests/feature_models/reshape_substitution_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/reshape_substitution_test.py
@@ -21,9 +21,6 @@
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 
 
-tp = mct.target_platform
-
-
 class BaseReshapeSubstitutionTest(BasePytorchFeatureNetworkTest):
 
     def __init__(self, unit_test):
diff --git a/tests/pytorch_tests/model_tests/feature_models/residual_collapsing_test.py b/tests/pytorch_tests/model_tests/feature_models/residual_collapsing_test.py
index dc69ddf2a..39a30b511 100644
--- a/tests/pytorch_tests/model_tests/feature_models/residual_collapsing_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/residual_collapsing_test.py
@@ -22,8 +22,6 @@
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
 
-tp = mct.target_platform
-
 
 class BaseResidualCollapsingTest(BasePytorchFeatureNetworkTest):
 
diff --git a/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py b/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py
index 4ed30dbe6..79e5b1878 100644
--- a/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/second_moment_correction_test.py
@@ -20,13 +20,12 @@
 import torch
 from torch.nn import Module
 
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core import FrameworkInfo, CoreConfig
 from model_compression_toolkit.core.common import Graph
 from model_compression_toolkit.core.common.statistics_correction.apply_second_moment_correction_to_graph import \
     quantized_model_builder_for_second_moment_correction
 from model_compression_toolkit.core.common.visualization.tensorboard_writer import init_tensorboard_writer
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
-from model_compression_toolkit.target_platform_capabilities.target_platform import FrameworkQuantizationCapabilities
 from model_compression_toolkit.core.pytorch.constants import EPSILON_VAL, GAMMA, BETA, MOVING_MEAN, MOVING_VARIANCE
 from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
 from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
@@ -36,6 +35,8 @@
 from model_compression_toolkit.core.runner import core_runner
 from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2pytorch import \
     AttachTpcToPytorch
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \
+    FrameworkQuantizationCapabilities
 from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 from tests.pytorch_tests.tpc_pytorch import get_pytorch_test_tpc_dict
diff --git a/tests/pytorch_tests/model_tests/feature_models/symmetric_activation_test.py b/tests/pytorch_tests/model_tests/feature_models/symmetric_activation_test.py
index b91a389e4..3c2139c51 100644
--- a/tests/pytorch_tests/model_tests/feature_models/symmetric_activation_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/symmetric_activation_test.py
@@ -17,8 +17,8 @@
 import torch
 
 import model_compression_toolkit as mct
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import THRESHOLD
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.core.common.user_info import UserInformation
 from model_compression_toolkit.core.pytorch.utils import to_torch_tensor
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
diff --git a/tests/pytorch_tests/model_tests/feature_models/tpc_test.py b/tests/pytorch_tests/model_tests/feature_models/tpc_test.py
index aa65d3e66..bdebfb5b9 100644
--- a/tests/pytorch_tests/model_tests/feature_models/tpc_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/tpc_test.py
@@ -27,8 +27,6 @@
 import tempfile
 import os
 
-tp = mct.target_platform
-
 
 class DummyNet(nn.Module):
     def __init__(self):
diff --git a/tests/pytorch_tests/model_tests/feature_models/uniform_activation_test.py b/tests/pytorch_tests/model_tests/feature_models/uniform_activation_test.py
index b55eec63e..57b9e2285 100644
--- a/tests/pytorch_tests/model_tests/feature_models/uniform_activation_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/uniform_activation_test.py
@@ -17,7 +17,7 @@
 import torch
 
 import model_compression_toolkit as mct
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core.common.user_info import UserInformation
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
diff --git a/tests/pytorch_tests/model_tests/test_feature_models_runner.py b/tests/pytorch_tests/model_tests/test_feature_models_runner.py
index 685403b42..c314e5048 100644
--- a/tests/pytorch_tests/model_tests/test_feature_models_runner.py
+++ b/tests/pytorch_tests/model_tests/test_feature_models_runner.py
@@ -26,7 +26,6 @@
 from model_compression_toolkit.gptq.common.gptq_config import RoundingType
 from model_compression_toolkit.gptq.pytorch.gptq_loss import sample_layer_attention_loss
 from model_compression_toolkit.target_platform_capabilities import constants as C
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
 from model_compression_toolkit.trainable_infrastructure import TrainingMethod
 from tests.pytorch_tests.model_tests.feature_models.activation_16bit_test import Activation16BitTest, \
     Activation16BitMixedPrecisionTest
@@ -416,7 +415,7 @@ def test_lut_weights_quantizer(self):
         values.
         """
         LUTWeightsQuantizerTest(self).run_test()
-        LUTWeightsQuantizerTest(self, quant_method=mct.target_platform.QuantizationMethod.LUT_SYM_QUANTIZER).run_test()
+        LUTWeightsQuantizerTest(self, quant_method=mct.QuantizationMethod.LUT_SYM_QUANTIZER).run_test()
 
     def test_lut_activation_quantizer(self):
         """
@@ -732,7 +731,7 @@ def test_qat(self):
         """
         QuantizationAwareTrainingTest(self).run_test()
         QuantizationAwareTrainingTest(self, finalize=True).run_test()
-        _method = mct.target_platform.QuantizationMethod.SYMMETRIC
+        _method = mct.QuantizationMethod.SYMMETRIC
         QuantizationAwareTrainingTest(self,
                                       weights_quantization_method=_method,
                                       activation_quantization_method=_method
@@ -741,7 +740,7 @@ def test_qat(self):
                                       weights_quantization_method=_method,
                                       activation_quantization_method=_method,
                                       finalize=True).run_test()
-        _method = mct.target_platform.QuantizationMethod.UNIFORM
+        _method = mct.QuantizationMethod.UNIFORM
         QuantizationAwareTrainingTest(self,
                                       weights_quantization_method=_method,
                                       activation_quantization_method=_method
@@ -751,18 +750,18 @@ def test_qat(self):
                                       activation_quantization_method=_method,
                                       finalize=True).run_test()
         QuantizationAwareTrainingTest(self,
-                                      weights_quantization_method=mct.target_platform.QuantizationMethod.SYMMETRIC,
-                                      activation_quantization_method=mct.target_platform.QuantizationMethod.SYMMETRIC,
+                                      weights_quantization_method=mct.QuantizationMethod.SYMMETRIC,
+                                      activation_quantization_method=mct.QuantizationMethod.SYMMETRIC,
                                       training_method=TrainingMethod.LSQ,
                                       finalize=True).run_test()
         QuantizationAwareTrainingTest(self,
-                                      weights_quantization_method=mct.target_platform.QuantizationMethod.UNIFORM,
-                                      activation_quantization_method=mct.target_platform.QuantizationMethod.UNIFORM,
+                                      weights_quantization_method=mct.QuantizationMethod.UNIFORM,
+                                      activation_quantization_method=mct.QuantizationMethod.UNIFORM,
                                       training_method=TrainingMethod.LSQ,
                                       finalize=True).run_test()
         QuantizationAwareTrainingTest(self,
-                                      weights_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO,
-                                      activation_quantization_method=mct.target_platform.QuantizationMethod.POWER_OF_TWO,
+                                      weights_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
+                                      activation_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
                                       training_method=TrainingMethod.LSQ,
                                       finalize=True).run_test()
         QuantizationAwareTrainingQuantizerHolderTest(self).run_test()
diff --git a/tests/pytorch_tests/tpc_pytorch.py b/tests/pytorch_tests/tpc_pytorch.py
index d4aac9470..c31f60b12 100644
--- a/tests/pytorch_tests/tpc_pytorch.py
+++ b/tests/pytorch_tests/tpc_pytorch.py
@@ -16,8 +16,6 @@
 import model_compression_toolkit as mct
 from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 
-tp = mct.target_platform
-
 
 def get_pytorch_test_tpc_dict(tpc, test_name, ftp_name):
     return {
diff --git a/tests/pytorch_tests/trainable_infrastructure_tests/base_pytorch_trainable_infra_test.py b/tests/pytorch_tests/trainable_infrastructure_tests/base_pytorch_trainable_infra_test.py
index 8baa2fcf4..d1076e6a8 100644
--- a/tests/pytorch_tests/trainable_infrastructure_tests/base_pytorch_trainable_infra_test.py
+++ b/tests/pytorch_tests/trainable_infrastructure_tests/base_pytorch_trainable_infra_test.py
@@ -19,7 +19,7 @@
 import torch
 import torch.nn as nn
 
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import mark_quantizer, QuantizationTarget, PytorchQuantizationWrapper
 from model_compression_toolkit.trainable_infrastructure.common.trainable_quantizer_config import \
     TrainableQuantizerWeightsConfig, TrainableQuantizerActivationConfig
diff --git a/tests/pytorch_tests/trainable_infrastructure_tests/test_pytorch_trainable_infra_runner.py b/tests/pytorch_tests/trainable_infrastructure_tests/test_pytorch_trainable_infra_runner.py
index e62dd06d7..a95ee4644 100644
--- a/tests/pytorch_tests/trainable_infrastructure_tests/test_pytorch_trainable_infra_runner.py
+++ b/tests/pytorch_tests/trainable_infrastructure_tests/test_pytorch_trainable_infra_runner.py
@@ -16,7 +16,7 @@
 
 import unittest
 
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from mct_quantizers import QuantizationTarget
 from model_compression_toolkit.qat.pytorch.quantizer.ste_rounding.symmetric_ste import STEWeightQATQuantizer
 from model_compression_toolkit.trainable_infrastructure import TrainingMethod
diff --git a/tests/pytorch_tests/trainable_infrastructure_tests/trainable_pytorch/test_pytorch_base_quantizer.py b/tests/pytorch_tests/trainable_infrastructure_tests/trainable_pytorch/test_pytorch_base_quantizer.py
index 461080e86..19edf364c 100644
--- a/tests/pytorch_tests/trainable_infrastructure_tests/trainable_pytorch/test_pytorch_base_quantizer.py
+++ b/tests/pytorch_tests/trainable_infrastructure_tests/trainable_pytorch/test_pytorch_base_quantizer.py
@@ -24,7 +24,7 @@
     BasePytorchTrainableQuantizer
 from tests.pytorch_tests.trainable_infrastructure_tests.base_pytorch_trainable_infra_test import \
     BasePytorchInfrastructureTest, ZeroWeightsQuantizer, ZeroActivationsQuantizer
-from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.trainable_infrastructure.pytorch.activation_quantizers import (
     STESymmetricActivationTrainableQuantizer, STEUniformActivationTrainableQuantizer)
 

From 6e49aa0597c7439f66823647d04a89afcfd828b8 Mon Sep 17 00:00:00 2001
From: liord <lior.dikstein@altair-semi.com>
Date: Mon, 13 Jan 2025 14:59:45 +0200
Subject: [PATCH 10/18] Remove folder "target_platform"

---
 .../keras/resource_utilization_data_facade.py |  4 +-
 .../pruning/keras/pruning_facade.py           |  4 +-
 .../ptq/keras/quantization_facade.py          |  4 +-
 .../attach2pytorch.py                         |  1 +
 .../helpers/tpcs_for_tests/v1/tpc.py          |  1 +
 .../helpers/tpcs_for_tests/v1_lut/tpc.py      |  1 +
 .../helpers/tpcs_for_tests/v1_pot/tpc.py      |  1 +
 .../helpers/tpcs_for_tests/v2_lut/tpc.py      |  1 +
 .../helpers/tpcs_for_tests/v3_lut/tpc.py      |  1 +
 .../helpers/tpcs_for_tests/v4/tpc.py          |  1 +
 .../network_editor/node_filter_test.py        |  2 +-
 ...ric_threshold_selection_activation_test.py |  2 +-
 ...uniform_range_selection_activation_test.py |  2 +-
 .../function_tests/test_pytorch_tpc.py        | 37 ++++++++++---------
 .../feature_models/lut_quantizer_test.py      |  1 +
 15 files changed, 36 insertions(+), 27 deletions(-)

diff --git a/model_compression_toolkit/core/keras/resource_utilization_data_facade.py b/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
index 87b4a06db..c1c127094 100644
--- a/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
+++ b/model_compression_toolkit/core/keras/resource_utilization_data_facade.py
@@ -20,11 +20,11 @@
 from model_compression_toolkit.constants import TENSORFLOW
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization_data import compute_resource_utilization_data
-from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
-    AttachTpcToKeras
 from model_compression_toolkit.verify_packages import FOUND_TF
 
 if FOUND_TF:
+    from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
+        AttachTpcToKeras
     from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
     from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
     from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
diff --git a/model_compression_toolkit/pruning/keras/pruning_facade.py b/model_compression_toolkit/pruning/keras/pruning_facade.py
index b6e3cdc0b..70162b9c9 100644
--- a/model_compression_toolkit/pruning/keras/pruning_facade.py
+++ b/model_compression_toolkit/pruning/keras/pruning_facade.py
@@ -18,8 +18,6 @@
 from model_compression_toolkit import get_target_platform_capabilities
 from model_compression_toolkit.constants import TENSORFLOW
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
-from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
-    AttachTpcToKeras
 from model_compression_toolkit.verify_packages import FOUND_TF
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.core.common.pruning.pruner import Pruner
@@ -32,6 +30,8 @@
 from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL
 
 if FOUND_TF:
+    from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
+        AttachTpcToKeras
     from model_compression_toolkit.core.keras.back2framework.float_model_builder import FloatKerasModelBuilder
     from model_compression_toolkit.core.keras.pruning.pruning_keras_implementation import PruningKerasImplementation
     from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
diff --git a/model_compression_toolkit/ptq/keras/quantization_facade.py b/model_compression_toolkit/ptq/keras/quantization_facade.py
index 3c93b8f37..1a65525a5 100644
--- a/model_compression_toolkit/ptq/keras/quantization_facade.py
+++ b/model_compression_toolkit/ptq/keras/quantization_facade.py
@@ -23,8 +23,6 @@
 from model_compression_toolkit.logger import Logger
 from model_compression_toolkit.constants import TENSORFLOW
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
-from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
-    AttachTpcToKeras
 from model_compression_toolkit.verify_packages import FOUND_TF
 from model_compression_toolkit.core.common.mixed_precision.resource_utilization_tools.resource_utilization import ResourceUtilization
 from model_compression_toolkit.core.common.mixed_precision.mixed_precision_quantization_config import \
@@ -34,6 +32,8 @@
 from model_compression_toolkit.metadata import create_model_metadata
 
 if FOUND_TF:
+    from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
+        AttachTpcToKeras
     from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
     from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
     from model_compression_toolkit.core.keras.keras_model_validation import KerasModelValidation
diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2pytorch.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2pytorch.py
index e7347cd81..4991fea86 100644
--- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2pytorch.py
+++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/attach2pytorch.py
@@ -31,6 +31,7 @@
 from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams
 from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2fw import \
     AttachTpcToFramework
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attribute_filter import Eq
 
 
 class AttachTpcToPytorch(AttachTpcToFramework):
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py
index 3c6e8fcfe..da37d434f 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1/tpc.py
@@ -16,6 +16,7 @@
 
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     IMX500_TP_MODEL
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py
index 89aac31ee..fd73f8297 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1_lut/tpc.py
@@ -16,6 +16,7 @@
 
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     WEIGHTS_QUANTIZATION_METHOD, IMX500_TP_MODEL
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py
index 18ce2262e..55a4de0d7 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v1_pot/tpc.py
@@ -16,6 +16,7 @@
 
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     IMX500_TP_MODEL
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py
index 20d79d9fd..6150f47b2 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v2_lut/tpc.py
@@ -16,6 +16,7 @@
 
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     WEIGHTS_QUANTIZATION_METHOD, IMX500_TP_MODEL
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py
index 2c41ad629..9ea9dbaf6 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v3_lut/tpc.py
@@ -16,6 +16,7 @@
 
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     WEIGHTS_QUANTIZATION_METHOD, IMX500_TP_MODEL
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py
index 5d30ce04b..f573ebba0 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v4/tpc.py
@@ -16,6 +16,7 @@
 
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.v1 as schema
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     IMX500_TP_MODEL
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/node_filter_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/node_filter_test.py
index afcd50a06..a3ad1fbd6 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/node_filter_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/network_editor/node_filter_test.py
@@ -16,7 +16,7 @@
 import tensorflow as tf
 
 import model_compression_toolkit as mct
-from mct_quantizers import KerasActivationQuantizationHolder
+from mct_quantizers import KerasActivationQuantizationHolder, QuantizationMethod
 from model_compression_toolkit.core.common.network_editors.actions import ChangeCandidatesActivationQuantConfigAttr, \
     ChangeQuantizationParamFunction, EditRule, ChangeCandidatesWeightsQuantConfigAttr
 from model_compression_toolkit.core.common.network_editors.node_filters import NodeNameFilter, NodeNameScopeFilter, \
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/symmetric_threshold_selection_activation_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/symmetric_threshold_selection_activation_test.py
index 0801c6158..0e59acf3c 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/symmetric_threshold_selection_activation_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/symmetric_threshold_selection_activation_test.py
@@ -17,7 +17,7 @@
 import tensorflow as tf
 import numpy as np
 
-from mct_quantizers import KerasActivationQuantizationHolder
+from mct_quantizers import KerasActivationQuantizationHolder, QuantizationMethod
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/uniform_range_selection_activation_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/uniform_range_selection_activation_test.py
index cc43072d0..aa5e21308 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/uniform_range_selection_activation_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/uniform_range_selection_activation_test.py
@@ -18,7 +18,7 @@
 import numpy as np
 
 
-from mct_quantizers import KerasActivationQuantizationHolder
+from mct_quantizers import KerasActivationQuantizationHolder, QuantizationMethod
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
diff --git a/tests/pytorch_tests/function_tests/test_pytorch_tpc.py b/tests/pytorch_tests/function_tests/test_pytorch_tpc.py
index 0b40f03e6..404669279 100644
--- a/tests/pytorch_tests/function_tests/test_pytorch_tpc.py
+++ b/tests/pytorch_tests/function_tests/test_pytorch_tpc.py
@@ -31,7 +31,8 @@
 from model_compression_toolkit.target_platform_capabilities.constants import DEFAULT_TP_MODEL, IMX500_TP_MODEL, \
     TFLITE_TP_MODEL, QNNPACK_TP_MODEL, KERNEL_ATTR, WEIGHTS_N_BITS, PYTORCH_KERNEL, BIAS_ATTR, BIAS
 from model_compression_toolkit.core.pytorch.pytorch_implementation import PytorchImplementation
-from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams, \
+    OperationsSetToLayers
 from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attribute_filter import Greater, \
     Smaller, Eq
 from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import \
@@ -114,13 +115,13 @@ def test_qco_by_pytorch_layer(self):
 
         tpc_pytorch = FrameworkQuantizationCapabilities(tpm)
         with tpc_pytorch:
-            tp.OperationsSetToLayers("conv", [torch.nn.Conv2d],
+            OperationsSetToLayers("conv", [torch.nn.Conv2d],
                                      attr_mapping={KERNEL_ATTR: DefaultDict(default_value=PYTORCH_KERNEL),
                                                    BIAS_ATTR: DefaultDict(default_value=BIAS)})
-            tp.OperationsSetToLayers("tanh", [torch.tanh])
-            tp.OperationsSetToLayers("avg_pool2d_kernel_2",
+            OperationsSetToLayers("tanh", [torch.tanh])
+            OperationsSetToLayers("avg_pool2d_kernel_2",
                                      [LayerFilterParams(torch.nn.functional.avg_pool2d, kernel_size=2)])
-            tp.OperationsSetToLayers("avg_pool2d",
+            OperationsSetToLayers("avg_pool2d",
                                      [torch.nn.functional.avg_pool2d])
 
         conv_node = get_node(torch.nn.Conv2d(3, 3, (1, 1)))
@@ -157,7 +158,7 @@ def test_get_layers_by_op(self):
         fw_tp = FrameworkQuantizationCapabilities(hm)
         with fw_tp:
             opset_layers = [torch.nn.Conv2d, LayerFilterParams(torch.nn.Softmax, dim=1)]
-            tp.OperationsSetToLayers('opsetA', opset_layers)
+            OperationsSetToLayers('opsetA', opset_layers)
         self.assertEqual(fw_tp.get_layers_by_opset_name('opsetA'), opset_layers)
         self.assertEqual(fw_tp.get_layers_by_opset(op_obj), opset_layers)
 
@@ -178,8 +179,8 @@ def test_get_layers_by_opconcat(self):
         with fw_tp:
             opset_layers_a = [torch.nn.Conv2d]
             opset_layers_b = [LayerFilterParams(torch.nn.Softmax, dim=1)]
-            tp.OperationsSetToLayers('opsetA', opset_layers_a)
-            tp.OperationsSetToLayers('opsetB', opset_layers_b)
+            OperationsSetToLayers('opsetA', opset_layers_a)
+            OperationsSetToLayers('opsetB', opset_layers_b)
 
         self.assertEqual(fw_tp.get_layers_by_opset(op_concat), opset_layers_a + opset_layers_b)
 
@@ -197,8 +198,8 @@ def test_layer_attached_to_multiple_opsets(self):
         fw_tp = FrameworkQuantizationCapabilities(hm)
         with self.assertRaises(Exception) as e:
             with fw_tp:
-                tp.OperationsSetToLayers('opsetA', [torch.nn.Conv2d])
-                tp.OperationsSetToLayers('opsetB', [torch.nn.Conv2d])
+                OperationsSetToLayers('opsetA', [torch.nn.Conv2d])
+                OperationsSetToLayers('opsetB', [torch.nn.Conv2d])
         self.assertEqual('Found layer Conv2d in more than one OperatorsSet', str(e.exception))
 
     def test_filter_layer_attached_to_multiple_opsets(self):
@@ -214,8 +215,8 @@ def test_filter_layer_attached_to_multiple_opsets(self):
         fw_tp = FrameworkQuantizationCapabilities(hm)
         with self.assertRaises(Exception) as e:
             with fw_tp:
-                tp.OperationsSetToLayers('opsetA', [LayerFilterParams(torch.nn.Softmax, dim=2)])
-                tp.OperationsSetToLayers('opsetB', [LayerFilterParams(torch.nn.Softmax, dim=2)])
+                OperationsSetToLayers('opsetA', [LayerFilterParams(torch.nn.Softmax, dim=2)])
+                OperationsSetToLayers('opsetB', [LayerFilterParams(torch.nn.Softmax, dim=2)])
         self.assertEqual('Found layer Softmax(dim=2) in more than one OperatorsSet', str(e.exception))
 
     # TODO: need to test as part of attach to fw tests
@@ -227,10 +228,10 @@ def test_filter_layer_attached_to_multiple_opsets(self):
     #                                     tpc_platform_type=None,
     #                                     operator_set=tuple([schema.OperatorsSet(name="opA")]),
     #                                     add_metadata=False)
-    #     hm_pytorch = tp.FrameworkQuantizationCapabilities(hm)
+    #     hm_pytorch = FrameworkQuantizationCapabilities(hm)
     #     with self.assertRaises(Exception) as e:
     #         with hm_pytorch:
-    #             tp.OperationsSetToLayers("conv", [torch.nn.Conv2d])
+    #             OperationsSetToLayers("conv", [torch.nn.Conv2d])
     #     self.assertEqual(
     #         'conv is not defined in the target platform model that is associated with the target platform capabilities.',
     #         str(e.exception))
@@ -252,11 +253,11 @@ def test_pytorch_fusing_patterns(self):
                                                fusing_patterns=tuple(fusing_patterns),
                                                add_metadata=False)
 
-        hm_keras = tp.FrameworkQuantizationCapabilities(hm)
+        hm_keras = FrameworkQuantizationCapabilities(hm)
         with hm_keras:
-            tp.OperationsSetToLayers("opA", [torch.conv2d])
-            tp.OperationsSetToLayers("opB", [torch.tanh])
-            tp.OperationsSetToLayers("opC", [LayerFilterParams(torch.relu, Greater("max_value", 7), negative_slope=0)])
+            OperationsSetToLayers("opA", [torch.conv2d])
+            OperationsSetToLayers("opB", [torch.tanh])
+            OperationsSetToLayers("opC", [LayerFilterParams(torch.relu, Greater("max_value", 7), negative_slope=0)])
 
         fusings = hm_keras.get_fusing_patterns()
         self.assertEqual(len(fusings), 2)
diff --git a/tests/pytorch_tests/model_tests/feature_models/lut_quantizer_test.py b/tests/pytorch_tests/model_tests/feature_models/lut_quantizer_test.py
index 9a63ac6a9..2eb8a5e3f 100644
--- a/tests/pytorch_tests/model_tests/feature_models/lut_quantizer_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/lut_quantizer_test.py
@@ -15,6 +15,7 @@
 import torch
 import numpy as np
 import model_compression_toolkit as mct
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core.common.network_editors.node_filters import NodeNameFilter
 from model_compression_toolkit.core.common.network_editors.actions import EditRule, \
     ChangeCandidatesWeightsQuantizationMethod

From 3603adfc5f13c1809ca35c61be2ecb98d2f523b5 Mon Sep 17 00:00:00 2001
From: liord <lior.dikstein@altair-semi.com>
Date: Mon, 13 Jan 2025 15:51:31 +0200
Subject: [PATCH 11/18] Fix broken links

---
 .../target_platform_capabilities/README.md                    | 4 ++--
 .../feature_networks/mixed_precision_tests.py                 | 1 +
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/model_compression_toolkit/target_platform_capabilities/README.md b/model_compression_toolkit/target_platform_capabilities/README.md
index fc8d973b7..782a166bd 100644
--- a/model_compression_toolkit/target_platform_capabilities/README.md
+++ b/model_compression_toolkit/target_platform_capabilities/README.md
@@ -21,9 +21,9 @@ Currently, MCT contains three target-platform models
 The default target-platform model is [IMX500](https://developer.sony.com/develop/imx500/), quantizes activations using 8 bits with power-of-two thresholds for 
 activations and symmetric threshold for weights.
 For mixed-precision quantization it uses either 2, 4, or 8 bits for quantizing the operators.
-One may view the full default target-platform model and its parameters [here](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py).
+One may view the full default target-platform model and its parameters [here](model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py).
 
-[TFLite](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py) and [QNNPACK](https://github.com/sony/model_optimization/blob/main/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py) models were created similarly and were used to create two TPCs: One for Keras TPC and one for PyTorch TPC (for each model, this 8 in total).
+[TFLite](model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py) and [QNNPACK](model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py) models were created similarly and were used to create two TPCs: One for Keras TPC and one for PyTorch TPC (for each model, this 8 in total).
 
 ## Usage
 
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py
index 15d2f71f6..f2eb37a8d 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/mixed_precision_tests.py
@@ -25,6 +25,7 @@
 from model_compression_toolkit.core.keras.constants import SIGMOID, SOFTMAX, BIAS
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, KERAS_KERNEL
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams
 from tests.common_tests.helpers.generate_test_tpc import generate_test_op_qc, generate_test_attr_configs
 from tests.keras_tests.exporter_tests.tflite_int8.imx500_int8_tpc import get_op_quantization_configs
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest

From bac8f06a7ef80d1b6bfcbe5833b9bfa520820ff5 Mon Sep 17 00:00:00 2001
From: Ofir Gordon <ofirgo@sony.com>
Date: Mon, 13 Jan 2025 16:17:48 +0200
Subject: [PATCH 12/18] fix imports and broken link

---
 model_compression_toolkit/__init__.py                    | 1 +
 .../target_platform_capabilities/README.md               | 4 ++--
 .../target_platform_capabilities/__init__.py             | 9 +++++++++
 .../targetplatform2framework/__init__.py                 | 2 ++
 tests/common_tests/helpers/generate_test_tpc.py          | 1 -
 tests/common_tests/helpers/prep_graph_for_func_test.py   | 2 --
 6 files changed, 14 insertions(+), 5 deletions(-)

diff --git a/model_compression_toolkit/__init__.py b/model_compression_toolkit/__init__.py
index 723328b9b..0ccd57d4f 100644
--- a/model_compression_toolkit/__init__.py
+++ b/model_compression_toolkit/__init__.py
@@ -14,6 +14,7 @@
 # ==============================================================================
 
 from model_compression_toolkit.defaultdict import DefaultDict
+from model_compression_toolkit import target_platform_capabilities as target_platform
 from model_compression_toolkit.target_platform_capabilities.tpc_models.get_target_platform_capabilities import get_target_platform_capabilities
 from model_compression_toolkit import core
 from model_compression_toolkit.logger import set_log_folder
diff --git a/model_compression_toolkit/target_platform_capabilities/README.md b/model_compression_toolkit/target_platform_capabilities/README.md
index 782a166bd..560e99b58 100644
--- a/model_compression_toolkit/target_platform_capabilities/README.md
+++ b/model_compression_toolkit/target_platform_capabilities/README.md
@@ -21,9 +21,9 @@ Currently, MCT contains three target-platform models
 The default target-platform model is [IMX500](https://developer.sony.com/develop/imx500/), quantizes activations using 8 bits with power-of-two thresholds for 
 activations and symmetric threshold for weights.
 For mixed-precision quantization it uses either 2, 4, or 8 bits for quantizing the operators.
-One may view the full default target-platform model and its parameters [here](model_compression_toolkit/target_platform_capabilities/tpc_models/imx500_tpc/v1/tpc.py).
+One may view the full default target-platform model and its parameters [here](./tpc_models/imx500_tpc/v1/tpc.py).
 
-[TFLite](model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py) and [QNNPACK](model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py) models were created similarly and were used to create two TPCs: One for Keras TPC and one for PyTorch TPC (for each model, this 8 in total).
+[TFLite](./tpc_models/tflite_tpc/v1/tpc.py) and [QNNPACK](./tpc_models/qnnpack_tpc/v1/tpc.py) models were created similarly and were used to create two TPCs: One for Keras TPC and one for PyTorch TPC (for each model, this 8 in total).
 
 ## Usage
 
diff --git a/model_compression_toolkit/target_platform_capabilities/__init__.py b/model_compression_toolkit/target_platform_capabilities/__init__.py
index feb1a4cc5..d3bb404e2 100644
--- a/model_compression_toolkit/target_platform_capabilities/__init__.py
+++ b/model_compression_toolkit/target_platform_capabilities/__init__.py
@@ -12,3 +12,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ==============================================================================
+
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attribute_filter import AttributeFilter
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import (
+    FrameworkQuantizationCapabilities, OperationsSetToLayers, Smaller, SmallerEq, NotEq, Eq, GreaterEq, Greater,
+    LayerFilterParams, OperationsToLayers, get_current_tpc)
+from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, OperatorsSet, \
+    OperatorSetGroup, Signedness, AttributeQuantizationConfig, OpQuantizationConfig, QuantizationConfigOptions, Fusing
+
+from mct_quantizers import QuantizationMethod
\ No newline at end of file
diff --git a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py
index 02e60eb6a..2b5fa4f84 100644
--- a/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py
+++ b/model_compression_toolkit/target_platform_capabilities/targetplatform2framework/__init__.py
@@ -17,6 +17,8 @@
 from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.framework_quantization_capabilities import FrameworkQuantizationCapabilities
 from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.layer_filter_params import \
     LayerFilterParams
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attribute_filter import \
+    Eq, GreaterEq, NotEq, SmallerEq, Greater, Smaller
 from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.operations_to_layers import \
     OperationsToLayers, OperationsSetToLayers
 
diff --git a/tests/common_tests/helpers/generate_test_tpc.py b/tests/common_tests/helpers/generate_test_tpc.py
index bb95c2e31..94920f253 100644
--- a/tests/common_tests/helpers/generate_test_tpc.py
+++ b/tests/common_tests/helpers/generate_test_tpc.py
@@ -26,7 +26,6 @@
 from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import \
     FrameworkQuantizationCapabilities
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import get_op_quantization_configs, generate_tpc
-import model_compression_toolkit as mct
 
 
 DEFAULT_WEIGHT_ATTR_CONFIG = 'default_weight_attr_config'
diff --git a/tests/common_tests/helpers/prep_graph_for_func_test.py b/tests/common_tests/helpers/prep_graph_for_func_test.py
index d77158615..04cbb047e 100644
--- a/tests/common_tests/helpers/prep_graph_for_func_test.py
+++ b/tests/common_tests/helpers/prep_graph_for_func_test.py
@@ -27,8 +27,6 @@
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_tpc, \
     get_op_quantization_configs
 
-import model_compression_toolkit as mct
-
 
 def prepare_graph_with_configs(in_model,
                                fw_impl,

From a9e2badf21c01817457770c630a1b00974244327 Mon Sep 17 00:00:00 2001
From: Ofir Gordon <ofirgo@sony.com>
Date: Mon, 13 Jan 2025 17:05:35 +0200
Subject: [PATCH 13/18] last import alignment

---
 model_compression_toolkit/__init__.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/model_compression_toolkit/__init__.py b/model_compression_toolkit/__init__.py
index 0ccd57d4f..28ec7a4c1 100644
--- a/model_compression_toolkit/__init__.py
+++ b/model_compression_toolkit/__init__.py
@@ -14,7 +14,7 @@
 # ==============================================================================
 
 from model_compression_toolkit.defaultdict import DefaultDict
-from model_compression_toolkit import target_platform_capabilities as target_platform
+from model_compression_toolkit import target_platform_capabilities
 from model_compression_toolkit.target_platform_capabilities.tpc_models.get_target_platform_capabilities import get_target_platform_capabilities
 from model_compression_toolkit import core
 from model_compression_toolkit.logger import set_log_folder

From 6b90aaa7d1353bced8648cb80140d7e610ea2e0f Mon Sep 17 00:00:00 2001
From: liord <lior.dikstein@altair-semi.com>
Date: Mon, 13 Jan 2025 14:17:30 +0200
Subject: [PATCH 14/18] Remove folder "target_platform"

---
 .../tpc_models/qnnpack_tpc/v1/tpc.py          |  1 +
 .../tpc_models/tflite_tpc/v1/tpc.py           |  1 +
 .../helpers/tpcs_for_tests/v2/tpc.py          |  1 +
 .../helpers/tpcs_for_tests/v3/tpc.py          |  1 +
 .../const_quantization_test.py                |  3 +--
 .../feature_networks/lut_quantizer.py         |  4 ++--
 .../feature_networks/qat/qat_test.py          | 11 +++++-----
 .../function_tests/test_custom_layer.py       |  1 +
 .../function_tests/test_hmse_error_method.py  | 17 ++++++++-------
 ...st_kl_error_quantization_configurations.py |  7 ++++---
 .../function_tests/test_layer_fusing.py       | 21 ++++++++++---------
 .../test_quantization_configurations.py       |  7 ++++---
 ...t_symmetric_threshold_selection_weights.py |  3 ++-
 .../test_uniform_range_selection_weights.py   |  3 ++-
 tests/keras_tests/tpc_keras.py                |  5 +++--
 .../test_quantization_configurations.py       |  7 ++++---
 .../model_tests/feature_models/qat_test.py    |  7 ++++---
 .../model_tests/test_feature_models_runner.py | 19 +++++++++--------
 18 files changed, 67 insertions(+), 52 deletions(-)

diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py
index 93bc46c1e..75a513fcf 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/qnnpack_tpc/v1/tpc.py
@@ -16,6 +16,7 @@
 
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, QNNPACK_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, \
diff --git a/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py b/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py
index 7d4a6048f..7cfa6581a 100644
--- a/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py
+++ b/model_compression_toolkit/target_platform_capabilities/tpc_models/tflite_tpc/v1/tpc.py
@@ -16,6 +16,7 @@
 
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import BIAS_ATTR, KERNEL_ATTR, TFLITE_TP_MODEL
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities, Signedness, \
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py
index f2dcfb4d0..0fce8d182 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v2/tpc.py
@@ -16,6 +16,7 @@
 
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     IMX500_TP_MODEL
diff --git a/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py b/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py
index f49cbfd94..f3f0f7045 100644
--- a/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py
+++ b/tests/common_tests/helpers/tpcs_for_tests/v3/tpc.py
@@ -16,6 +16,7 @@
 
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.constants import FLOAT_BITWIDTH
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, BIAS_ATTR, WEIGHTS_N_BITS, \
     IMX500_TP_MODEL
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/const_quantization_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/const_quantization_test.py
index 24e9e85e9..4849c7a40 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/const_quantization_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/const_quantization_test.py
@@ -27,8 +27,7 @@
 from tests.keras_tests.feature_networks_tests.base_keras_feature_test import BaseKerasFeatureNetworkTest
 from tests.common_tests.helpers.tensors_compare import cosine_similarity
 from tests.keras_tests.utils import get_layers_from_model_by_type
-from mct_quantizers import KerasQuantizationWrapper
-
+from mct_quantizers import KerasQuantizationWrapper, QuantizationMethod
 
 keras = tf.keras
 layers = keras.layers
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/lut_quantizer.py b/tests/keras_tests/feature_networks_tests/feature_networks/lut_quantizer.py
index b3d24daa3..c7393e64c 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/lut_quantizer.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/lut_quantizer.py
@@ -19,7 +19,7 @@
 import tensorflow as tf
 
 import model_compression_toolkit as mct
-from mct_quantizers import KerasActivationQuantizationHolder
+from mct_quantizers import KerasActivationQuantizationHolder, QuantizationMethod
 from model_compression_toolkit.core.common.network_editors.actions import EditRule, \
     ChangeCandidatesWeightsQuantizationMethod
 from model_compression_toolkit.core.common.network_editors.node_filters import NodeNameFilter
@@ -68,7 +68,7 @@ def get_debug_config(self):
             network_editor=[EditRule(filter=NodeNameFilter(self.node_to_change_name),
                                      action=ChangeCandidatesWeightsQuantizationMethod(
                                          weights_quantization_method=
-                                         mct.QuantizationMethod.POWER_OF_TWO,
+                                         QuantizationMethod.POWER_OF_TWO,
                                          attr_name=KERNEL))])
 
     def get_input_shapes(self):
diff --git a/tests/keras_tests/feature_networks_tests/feature_networks/qat/qat_test.py b/tests/keras_tests/feature_networks_tests/feature_networks/qat/qat_test.py
index 6fd65e2f9..d9bbb6b8b 100644
--- a/tests/keras_tests/feature_networks_tests/feature_networks/qat/qat_test.py
+++ b/tests/keras_tests/feature_networks_tests/feature_networks/qat/qat_test.py
@@ -19,7 +19,8 @@
 import tensorflow as tf
 
 import model_compression_toolkit as mct
-from mct_quantizers import QuantizationTarget, KerasActivationQuantizationHolder, KerasQuantizationWrapper
+from mct_quantizers import QuantizationTarget, KerasActivationQuantizationHolder, KerasQuantizationWrapper, \
+    QuantizationMethod
 from mct_quantizers.common.base_inferable_quantizer import QuantizerID
 from mct_quantizers.common.get_all_subclasses import get_all_subclasses
 from mct_quantizers.keras.quantizers import BaseKerasInferableQuantizer
@@ -45,8 +46,8 @@
 
 class QuantizationAwareTrainingTest(BaseKerasFeatureNetworkTest):
     def __init__(self, unit_test, layer, weight_bits=2, activation_bits=4, finalize=False,
-                 weights_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
-                 activation_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
+                 weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
+                 activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
                  test_loading=False):
         self.layer = layer
         self.weight_bits = weight_bits
@@ -163,8 +164,8 @@ def compare(self, quantized_model, float_model, loaded_model, input_x=None, quan
 
 class QATWrappersTest(BaseKerasFeatureNetworkTest):
     def __init__(self, unit_test, layer, weight_bits=2, activation_bits=4, finalize=True,
-                 weights_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
-                 activation_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
+                 weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
+                 activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
                  training_method=TrainingMethod.STE,
                  per_channel=True,
                  test_loading=False):
diff --git a/tests/keras_tests/function_tests/test_custom_layer.py b/tests/keras_tests/function_tests/test_custom_layer.py
index 420d558ee..fd48651a8 100644
--- a/tests/keras_tests/function_tests/test_custom_layer.py
+++ b/tests/keras_tests/function_tests/test_custom_layer.py
@@ -19,6 +19,7 @@
 
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core import CoreConfig, QuantizationConfig
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import Signedness
 from model_compression_toolkit.target_platform_capabilities.constants import BIAS_ATTR, KERNEL_ATTR
diff --git a/tests/keras_tests/function_tests/test_hmse_error_method.py b/tests/keras_tests/function_tests/test_hmse_error_method.py
index 6e068e575..300a8528b 100644
--- a/tests/keras_tests/function_tests/test_hmse_error_method.py
+++ b/tests/keras_tests/function_tests/test_hmse_error_method.py
@@ -20,6 +20,7 @@
 
 import model_compression_toolkit as mct
 import model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema as schema
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit import DefaultDict
 from model_compression_toolkit.core import QuantizationConfig
 from model_compression_toolkit.constants import THRESHOLD, RANGE_MAX, NUM_QPARAM_HESSIAN_SAMPLES
@@ -136,44 +137,44 @@ def _run_node_verification(node_type):
         _run_node_verification(layers.Dense)
 
     def test_pot_threshold_selection_hmse_per_channel(self):
-        self._setup_with_args(quant_method=mct.QuantizationMethod.POWER_OF_TWO, per_channel=True)
+        self._setup_with_args(quant_method=QuantizationMethod.POWER_OF_TWO, per_channel=True)
         calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset,
                                       hessian_info_service=self.his, num_hessian_samples=1)
         self._verify_params_calculation_execution(THRESHOLD)
 
     def test_pot_threshold_selection_hmse_per_tensor(self):
-        self._setup_with_args(quant_method=mct.QuantizationMethod.POWER_OF_TWO, per_channel=False)
+        self._setup_with_args(quant_method=QuantizationMethod.POWER_OF_TWO, per_channel=False)
         calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset,
                                       hessian_info_service=self.his, num_hessian_samples=1)
         self._verify_params_calculation_execution(THRESHOLD)
 
     def test_symmetric_threshold_selection_hmse_per_channel(self):
-        self._setup_with_args(quant_method=mct.QuantizationMethod.SYMMETRIC, per_channel=True)
+        self._setup_with_args(quant_method=QuantizationMethod.SYMMETRIC, per_channel=True)
         calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset,
                                       hessian_info_service=self.his, num_hessian_samples=1)
         self._verify_params_calculation_execution(THRESHOLD)
 
     def test_symmetric_threshold_selection_hmse_per_tensor(self):
-        self._setup_with_args(quant_method=mct.QuantizationMethod.SYMMETRIC, per_channel=False)
+        self._setup_with_args(quant_method=QuantizationMethod.SYMMETRIC, per_channel=False)
         calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset,
                                       hessian_info_service=self.his, num_hessian_samples=1)
         self._verify_params_calculation_execution(THRESHOLD)
 
     def test_usniform_threshold_selection_hmse_per_channel(self):
-        self._setup_with_args(quant_method=mct.QuantizationMethod.UNIFORM, per_channel=True)
+        self._setup_with_args(quant_method=QuantizationMethod.UNIFORM, per_channel=True)
         calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset,
                                       hessian_info_service=self.his, num_hessian_samples=1)
         self._verify_params_calculation_execution(RANGE_MAX)
 
     def test_uniform_threshold_selection_hmse_per_tensor(self):
-        self._setup_with_args(quant_method=mct.QuantizationMethod.UNIFORM, per_channel=False)
+        self._setup_with_args(quant_method=QuantizationMethod.UNIFORM, per_channel=False)
         calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset,
                                       hessian_info_service=self.his, num_hessian_samples=1)
         self._verify_params_calculation_execution(RANGE_MAX)
 
     def test_threshold_selection_hmse_no_gptq(self):
         with self.assertRaises(ValueError) as e:
-            self._setup_with_args(quant_method=mct.QuantizationMethod.SYMMETRIC, per_channel=True,
+            self._setup_with_args(quant_method=QuantizationMethod.SYMMETRIC, per_channel=True,
                                   running_gptq=False)
         self.assertTrue('The HMSE error method for parameters selection is only supported when running GPTQ '
                         'optimization due to long execution time that is not suitable for basic PTQ.' in
@@ -200,7 +201,7 @@ def _generate_bn_quantization_tpc(quant_method, per_channel):
 
             return tpc
 
-        self._setup_with_args(quant_method=mct.QuantizationMethod.SYMMETRIC, per_channel=True,
+        self._setup_with_args(quant_method=QuantizationMethod.SYMMETRIC, per_channel=True,
                               tpc_fn=_generate_bn_quantization_tpc, model_gen_fn=no_bn_fusion_model_gen)
         calculate_quantization_params(self.graph, fw_impl=self.keras_impl, repr_data_gen_fn=representative_dataset,
                                       hessian_info_service=self.his, num_hessian_samples=1)
diff --git a/tests/keras_tests/function_tests/test_kl_error_quantization_configurations.py b/tests/keras_tests/function_tests/test_kl_error_quantization_configurations.py
index 52d0bbbe3..242f9ebff 100644
--- a/tests/keras_tests/function_tests/test_kl_error_quantization_configurations.py
+++ b/tests/keras_tests/function_tests/test_kl_error_quantization_configurations.py
@@ -12,6 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # ==============================================================================
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 import unittest
@@ -38,9 +39,9 @@ def test_run_quantization_config_mbv1(self):
         def representative_data_gen():
             yield [x]
 
-        quantizer_methods = [mct.QuantizationMethod.POWER_OF_TWO,
-                             mct.QuantizationMethod.SYMMETRIC,
-                             mct.QuantizationMethod.UNIFORM]
+        quantizer_methods = [QuantizationMethod.POWER_OF_TWO,
+                             QuantizationMethod.SYMMETRIC,
+                             QuantizationMethod.UNIFORM]
 
         quantization_error_methods = [mct.core.QuantizationErrorMethod.KL]
         relu_bound_to_power_of_2 = [True, False]
diff --git a/tests/keras_tests/function_tests/test_layer_fusing.py b/tests/keras_tests/function_tests/test_layer_fusing.py
index 9da236bd5..9c14045e5 100644
--- a/tests/keras_tests/function_tests/test_layer_fusing.py
+++ b/tests/keras_tests/function_tests/test_layer_fusing.py
@@ -8,6 +8,7 @@
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from model_compression_toolkit.core.keras.keras_implementation import KerasImplementation
 from model_compression_toolkit.core.common.quantization.quantization_config import CustomOpsetLayers
+from model_compression_toolkit.target_platform_capabilities.targetplatform2framework import LayerFilterParams
 from model_compression_toolkit.target_platform_capabilities.targetplatform2framework.attach2keras import \
     AttachTpcToKeras
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import \
@@ -199,14 +200,14 @@ def test_layer_fusing_2(self):
 
         qc = QuantizationConfig(custom_tpc_opset_to_layer={"Conv": CustomOpsetLayers([Conv2D]),
                                                            "AnyReLU": CustomOpsetLayers([tf.nn.relu,
-                                                                        tp.LayerFilterParams(ReLU, negative_slope=0.0),
-                                                                        tp.LayerFilterParams(Activation,
+                                                                        LayerFilterParams(ReLU, negative_slope=0.0),
+                                                                        LayerFilterParams(Activation,
                                                                                              activation="relu")]),
-                                                           "Swish": CustomOpsetLayers([tf.nn.swish, tp.LayerFilterParams(Activation,
+                                                           "Swish": CustomOpsetLayers([tf.nn.swish, LayerFilterParams(Activation,
                                                                                                         activation="swish")]),
-                                                           "Sigmoid": CustomOpsetLayers([tf.nn.sigmoid, tp.LayerFilterParams(Activation,
+                                                           "Sigmoid": CustomOpsetLayers([tf.nn.sigmoid, LayerFilterParams(Activation,
                                                                                                             activation="sigmoid")]),
-                                                           "Tanh": CustomOpsetLayers([tf.nn.tanh, tp.LayerFilterParams(Activation,
+                                                           "Tanh": CustomOpsetLayers([tf.nn.tanh, LayerFilterParams(Activation,
                                                                                                       activation="tanh")])})
 
         fusion_graph = prepare_graph_with_configs(model, KerasImplementation(), DEFAULT_KERAS_INFO,
@@ -221,8 +222,8 @@ def test_layer_fusing_3(self):
 
         qc = QuantizationConfig(custom_tpc_opset_to_layer={"Conv": CustomOpsetLayers([Conv2D]),
                                                            "AnyReLU": CustomOpsetLayers([tf.nn.relu,
-                                                                        tp.LayerFilterParams(ReLU, negative_slope=0.0),
-                                                                        tp.LayerFilterParams(Activation,
+                                                                        LayerFilterParams(ReLU, negative_slope=0.0),
+                                                                        LayerFilterParams(Activation,
                                                                                              activation="relu")])})
 
         fusion_graph = prepare_graph_with_configs(model, KerasImplementation(), DEFAULT_KERAS_INFO,
@@ -240,11 +241,11 @@ def test_layer_fusing_4(self):
             "Conv": CustomOpsetLayers([Conv2D]),
             "FullyConnected": CustomOpsetLayers([Dense]),
             "AnyReLU": CustomOpsetLayers([tf.nn.relu,
-                         tp.LayerFilterParams(ReLU, negative_slope=0.0),
-                         tp.LayerFilterParams(Activation,
+                         LayerFilterParams(ReLU, negative_slope=0.0),
+                         LayerFilterParams(Activation,
                                               activation="relu")]),
             "Add": CustomOpsetLayers([tf.add, Add]),
-            "Swish": CustomOpsetLayers([tf.nn.swish, tp.LayerFilterParams(Activation, activation="swish")]),
+            "Swish": CustomOpsetLayers([tf.nn.swish, LayerFilterParams(Activation, activation="swish")]),
         })
 
         fusion_graph = prepare_graph_with_configs(model, KerasImplementation(), DEFAULT_KERAS_INFO,
diff --git a/tests/keras_tests/function_tests/test_quantization_configurations.py b/tests/keras_tests/function_tests/test_quantization_configurations.py
index bbd7fccc4..29a4169e0 100644
--- a/tests/keras_tests/function_tests/test_quantization_configurations.py
+++ b/tests/keras_tests/function_tests/test_quantization_configurations.py
@@ -21,6 +21,7 @@
 from tensorflow.keras import layers
 
 import model_compression_toolkit as mct
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_keras_tpc
 from model_compression_toolkit.core.keras.default_framework_info import DEFAULT_KERAS_INFO
 from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
@@ -41,9 +42,9 @@ def test_run_quantization_config(self):
         def representative_data_gen():
             yield [x]
 
-        quantizer_methods = [mct.QuantizationMethod.POWER_OF_TWO,
-                             mct.QuantizationMethod.SYMMETRIC,
-                             mct.QuantizationMethod.UNIFORM]
+        quantizer_methods = [QuantizationMethod.POWER_OF_TWO,
+                             QuantizationMethod.SYMMETRIC,
+                             QuantizationMethod.UNIFORM]
 
         quantization_error_methods = [mct.core.QuantizationErrorMethod.MSE,
                                       mct.core.QuantizationErrorMethod.NOCLIPPING,
diff --git a/tests/keras_tests/function_tests/test_symmetric_threshold_selection_weights.py b/tests/keras_tests/function_tests/test_symmetric_threshold_selection_weights.py
index c063c6188..86b55c912 100644
--- a/tests/keras_tests/function_tests/test_symmetric_threshold_selection_weights.py
+++ b/tests/keras_tests/function_tests/test_symmetric_threshold_selection_weights.py
@@ -19,6 +19,7 @@
 from keras.layers import Conv2D, Conv2DTranspose
 
 import model_compression_toolkit as mct
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core import QuantizationConfig, QuantizationErrorMethod
 from model_compression_toolkit.constants import THRESHOLD
 from model_compression_toolkit.core.keras.constants import KERNEL
@@ -58,7 +59,7 @@ def representative_dataset():
 
 def get_tpc(per_channel):
     tp = generate_test_tpc(edit_params_dict={
-        'weights_quantization_method': mct.QuantizationMethod.SYMMETRIC,
+        'weights_quantization_method': QuantizationMethod.SYMMETRIC,
         'weights_per_channel_threshold': per_channel})
     tpc = generate_keras_tpc(name="symmetric_threshold_selection_test", tpc=tp)
 
diff --git a/tests/keras_tests/function_tests/test_uniform_range_selection_weights.py b/tests/keras_tests/function_tests/test_uniform_range_selection_weights.py
index 57599b0e7..f4d51b014 100644
--- a/tests/keras_tests/function_tests/test_uniform_range_selection_weights.py
+++ b/tests/keras_tests/function_tests/test_uniform_range_selection_weights.py
@@ -19,6 +19,7 @@
 from keras.layers import Conv2D, Conv2DTranspose
 
 import model_compression_toolkit as mct
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core import QuantizationConfig, QuantizationErrorMethod
 from model_compression_toolkit.constants import RANGE_MIN, RANGE_MAX
 from model_compression_toolkit.core.keras.constants import KERNEL
@@ -57,7 +58,7 @@ def representative_dataset():
 
 def get_tpc(per_channel):
     tp = generate_test_tpc({
-        'weights_quantization_method': mct.QuantizationMethod.UNIFORM,
+        'weights_quantization_method': QuantizationMethod.UNIFORM,
         'weights_per_channel_threshold': per_channel})
     tpc = generate_keras_tpc(name="uniform_range_selection_test", tpc=tp)
 
diff --git a/tests/keras_tests/tpc_keras.py b/tests/keras_tests/tpc_keras.py
index ed0f382b9..b77d1a1c3 100644
--- a/tests/keras_tests/tpc_keras.py
+++ b/tests/keras_tests/tpc_keras.py
@@ -15,6 +15,7 @@
 from packaging import version
 import tensorflow as tf
 
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.defaultdict import DefaultDict
 from model_compression_toolkit.target_platform_capabilities.constants import KERNEL_ATTR, KERAS_KERNEL, BIAS_ATTR, BIAS, \
     KERAS_DEPTHWISE_KERNEL
@@ -35,8 +36,8 @@
 
 
 def get_tpc(name, weight_bits=8, activation_bits=8,
-            weights_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
-            activation_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
+            weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
+            activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
             per_channel=True):
     tpc = generate_test_tpc({'weights_n_bits': weight_bits,
                                        'activation_n_bits': activation_bits,
diff --git a/tests/pytorch_tests/function_tests/test_quantization_configurations.py b/tests/pytorch_tests/function_tests/test_quantization_configurations.py
index c43c5c67f..fc50a03af 100644
--- a/tests/pytorch_tests/function_tests/test_quantization_configurations.py
+++ b/tests/pytorch_tests/function_tests/test_quantization_configurations.py
@@ -20,6 +20,7 @@
 import torch.nn
 
 import model_compression_toolkit as mct
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.target_platform_capabilities.tpc_models.imx500_tpc.latest import generate_pytorch_tpc
 from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 import torch
@@ -50,9 +51,9 @@ def test_run_quantization_config(self):
         def representative_data_gen():
             yield [x]
 
-        quantizer_methods = [mct.QuantizationMethod.POWER_OF_TWO,
-                             mct.QuantizationMethod.SYMMETRIC,
-                             mct.QuantizationMethod.UNIFORM]
+        quantizer_methods = [QuantizationMethod.POWER_OF_TWO,
+                             QuantizationMethod.SYMMETRIC,
+                             QuantizationMethod.UNIFORM]
 
         quantization_error_methods = [mct.core.QuantizationErrorMethod.MSE,
                                       mct.core.QuantizationErrorMethod.NOCLIPPING,
diff --git a/tests/pytorch_tests/model_tests/feature_models/qat_test.py b/tests/pytorch_tests/model_tests/feature_models/qat_test.py
index 43c5f3b37..73154073c 100644
--- a/tests/pytorch_tests/model_tests/feature_models/qat_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/qat_test.py
@@ -23,7 +23,8 @@
 from torch import Tensor
 
 import model_compression_toolkit as mct
-from mct_quantizers import PytorchActivationQuantizationHolder, QuantizationTarget, PytorchQuantizationWrapper
+from mct_quantizers import PytorchActivationQuantizationHolder, QuantizationTarget, PytorchQuantizationWrapper, \
+    QuantizationMethod
 from mct_quantizers.common.base_inferable_quantizer import QuantizerID
 from mct_quantizers.common.get_all_subclasses import get_all_subclasses
 from mct_quantizers.pytorch.quantizers import BasePyTorchInferableQuantizer
@@ -94,8 +95,8 @@ def repr_datagen():
 
 class QuantizationAwareTrainingTest(BasePytorchFeatureNetworkTest):
     def __init__(self, unit_test, weight_bits=2, activation_bits=4,
-                 weights_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
-                 activation_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
+                 weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
+                 activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
                  training_method=TrainingMethod.STE,
                  finalize=False, test_loading=False):
 
diff --git a/tests/pytorch_tests/model_tests/test_feature_models_runner.py b/tests/pytorch_tests/model_tests/test_feature_models_runner.py
index c314e5048..79bcbd3dc 100644
--- a/tests/pytorch_tests/model_tests/test_feature_models_runner.py
+++ b/tests/pytorch_tests/model_tests/test_feature_models_runner.py
@@ -20,6 +20,7 @@
 from torch import nn
 
 import model_compression_toolkit as mct
+from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core.common.mixed_precision.distance_weighting import MpDistanceWeighting
 from model_compression_toolkit.core.common.network_editors import NodeTypeFilter, NodeNameFilter
 from model_compression_toolkit.core.pytorch.pytorch_device_config import get_working_device
@@ -415,7 +416,7 @@ def test_lut_weights_quantizer(self):
         values.
         """
         LUTWeightsQuantizerTest(self).run_test()
-        LUTWeightsQuantizerTest(self, quant_method=mct.QuantizationMethod.LUT_SYM_QUANTIZER).run_test()
+        LUTWeightsQuantizerTest(self, quant_method=QuantizationMethod.LUT_SYM_QUANTIZER).run_test()
 
     def test_lut_activation_quantizer(self):
         """
@@ -731,7 +732,7 @@ def test_qat(self):
         """
         QuantizationAwareTrainingTest(self).run_test()
         QuantizationAwareTrainingTest(self, finalize=True).run_test()
-        _method = mct.QuantizationMethod.SYMMETRIC
+        _method = QuantizationMethod.SYMMETRIC
         QuantizationAwareTrainingTest(self,
                                       weights_quantization_method=_method,
                                       activation_quantization_method=_method
@@ -740,7 +741,7 @@ def test_qat(self):
                                       weights_quantization_method=_method,
                                       activation_quantization_method=_method,
                                       finalize=True).run_test()
-        _method = mct.QuantizationMethod.UNIFORM
+        _method = QuantizationMethod.UNIFORM
         QuantizationAwareTrainingTest(self,
                                       weights_quantization_method=_method,
                                       activation_quantization_method=_method
@@ -750,18 +751,18 @@ def test_qat(self):
                                       activation_quantization_method=_method,
                                       finalize=True).run_test()
         QuantizationAwareTrainingTest(self,
-                                      weights_quantization_method=mct.QuantizationMethod.SYMMETRIC,
-                                      activation_quantization_method=mct.QuantizationMethod.SYMMETRIC,
+                                      weights_quantization_method=QuantizationMethod.SYMMETRIC,
+                                      activation_quantization_method=QuantizationMethod.SYMMETRIC,
                                       training_method=TrainingMethod.LSQ,
                                       finalize=True).run_test()
         QuantizationAwareTrainingTest(self,
-                                      weights_quantization_method=mct.QuantizationMethod.UNIFORM,
-                                      activation_quantization_method=mct.QuantizationMethod.UNIFORM,
+                                      weights_quantization_method=QuantizationMethod.UNIFORM,
+                                      activation_quantization_method=QuantizationMethod.UNIFORM,
                                       training_method=TrainingMethod.LSQ,
                                       finalize=True).run_test()
         QuantizationAwareTrainingTest(self,
-                                      weights_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
-                                      activation_quantization_method=mct.QuantizationMethod.POWER_OF_TWO,
+                                      weights_quantization_method=QuantizationMethod.POWER_OF_TWO,
+                                      activation_quantization_method=QuantizationMethod.POWER_OF_TWO,
                                       training_method=TrainingMethod.LSQ,
                                       finalize=True).run_test()
         QuantizationAwareTrainingQuantizerHolderTest(self).run_test()

From 52e0a9d2369dc64dfb16621fd89488bbaa36386f Mon Sep 17 00:00:00 2001
From: Ofir Gordon <ofirgo@sony.com>
Date: Mon, 13 Jan 2025 17:50:36 +0200
Subject: [PATCH 15/18] fix docsrc and missing commit

---
 docsrc/source/api/api_docs/index.rst          |  4 +--
 .../api/api_docs/modules/layer_filters.rst    | 14 +++++-----
 .../api/api_docs/modules/target_platform.rst  | 28 +++++++++----------
 .../modules/trainable_infrastructure.rst      |  4 +--
 docsrc/source/api/api_docs/notes/tpc_note.rst |  4 +--
 .../model_tests/test_feature_models_runner.py |  8 +++---
 6 files changed, 31 insertions(+), 31 deletions(-)

diff --git a/docsrc/source/api/api_docs/index.rst b/docsrc/source/api/api_docs/index.rst
index 1e3468fb4..d2f9119e2 100644
--- a/docsrc/source/api/api_docs/index.rst
+++ b/docsrc/source/api/api_docs/index.rst
@@ -106,9 +106,9 @@ keras_load_quantized_model
 - :ref:`keras_load_quantized_model<ug-keras_load_quantized_model>`: A function to load a quantized keras model.
 
 
-target_platform
+target_platform_capabilities
 ================
-- :ref:`target_platform<ug-target_platform>`: Module to create and model hardware-related settings to optimize the model according to, by the hardware the optimized model will use during inference.
+- :ref:`target_platform_capabilities<ug-target_platform_capabilities>`: Module to create and model hardware-related settings to optimize the model according to, by the hardware the optimized model will use during inference.
 - :ref:`get_target_platform_capabilities<ug-get_target_platform_capabilities>`: A function to get a target platform model for Tensorflow and Pytorch.
 - :ref:`DefaultDict<ug-DefaultDict>`: Util class for creating a FrameworkQuantizationCapabilities.
 
diff --git a/docsrc/source/api/api_docs/modules/layer_filters.rst b/docsrc/source/api/api_docs/modules/layer_filters.rst
index 2279e54b6..f21836e08 100644
--- a/docsrc/source/api/api_docs/modules/layer_filters.rst
+++ b/docsrc/source/api/api_docs/modules/layer_filters.rst
@@ -15,30 +15,30 @@ one may use the next filters to check if a layer configuration holds the created
 Attribute Filters
 ==================
 
-.. autoclass:: model_compression_toolkit.target_platform.AttributeFilter
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.AttributeFilter
 
 |
 
-.. autoclass:: model_compression_toolkit.target_platform.Eq
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.Eq
 
 |
 
-.. autoclass:: model_compression_toolkit.target_platform.NotEq
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.NotEq
 
 |
 
-.. autoclass:: model_compression_toolkit.target_platform.Greater
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.Greater
 
 |
 
 
-.. autoclass:: model_compression_toolkit.target_platform.GreaterEq
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.GreaterEq
 
 |
 
 
-.. autoclass:: model_compression_toolkit.target_platform.Smaller
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.Smaller
 
 |
 
-.. autoclass:: model_compression_toolkit.target_platform.SmallerEq
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.SmallerEq
diff --git a/docsrc/source/api/api_docs/modules/target_platform.rst b/docsrc/source/api/api_docs/modules/target_platform.rst
index 4fe74dfb4..ed1995a5c 100644
--- a/docsrc/source/api/api_docs/modules/target_platform.rst
+++ b/docsrc/source/api/api_docs/modules/target_platform.rst
@@ -1,10 +1,10 @@
 :orphan:
 
-.. _ug-target_platform:
+.. _ug-target_platform_capabilities:
 
 
 =================================
-target_platform Module
+target_platform_capabilities Module
 =================================
 
 MCT can be configured to quantize and optimize models for different hardware settings.
@@ -14,7 +14,7 @@ uses `per-tensor weights quantization <https://github.com/pytorch/pytorch/blob/m
 for Conv2d, while when using tflite modeling, Tensorflow uses `per-channel weights quantization for
 Conv2D <https://www.tensorflow.org/lite/performance/quantization_spec#per-axis_vs_per-tensor>`_.
 
-This can be addressed in MCT by using the target_platform module, that can configure different
+This can be addressed in MCT by using the target_platform_capabilities module, that can configure different
 parameters that are hardware-related, and the optimization process will use this to optimize the model accordingly.
 Models for IMX500, TFLite and qnnpack can be observed `here <https://github.com/sony/model_optimization/tree/main/model_compression_toolkit/target_platform_capabilities>`_, and can be used using :ref:`get_target_platform_capabilities function<ug-get_target_platform_capabilities>`.
 
@@ -36,61 +36,61 @@ Now, we will detail about the different components.
 
 QuantizationMethod
 ==========================
-.. autoclass:: model_compression_toolkit.target_platform.QuantizationMethod
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.QuantizationMethod
 
 
 
 OpQuantizationConfig
 ======================
-.. autoclass:: model_compression_toolkit.target_platform.OpQuantizationConfig
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.OpQuantizationConfig
 
 
 
 AttributeQuantizationConfig
 ============================
-.. autoclass:: model_compression_toolkit.target_platform.AttributeQuantizationConfig
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.AttributeQuantizationConfig
 
 
 QuantizationConfigOptions
 ============================
-.. autoclass:: model_compression_toolkit.target_platform.QuantizationConfigOptions
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.QuantizationConfigOptions
 
 
 TargetPlatformCapabilities
 =======================
-.. autoclass:: model_compression_toolkit.target_platform.TargetPlatformCapabilities
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.TargetPlatformCapabilities
 
 
 OperatorsSet
 ================
-.. autoclass:: model_compression_toolkit.target_platform.OperatorsSet
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.OperatorsSet
 
 
 
 Fusing
 ==============
-.. autoclass:: model_compression_toolkit.target_platform.Fusing
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.Fusing
 
 
 
 OperatorSetGroup
 ====================
-.. autoclass:: model_compression_toolkit.target_platform.OperatorSetGroup
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.OperatorSetGroup
 
 
 OperationsToLayers
 =====================
-.. autoclass:: model_compression_toolkit.target_platform.OperationsToLayers
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.OperationsToLayers
 
 
 OperationsSetToLayers
 =========================
-.. autoclass:: model_compression_toolkit.target_platform.OperationsSetToLayers
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.OperationsSetToLayers
 
 
 LayerFilterParams
 =========================
-.. autoclass:: model_compression_toolkit.target_platform.LayerFilterParams
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.LayerFilterParams
 
 More filters and usage examples are detailed :ref:`here<ug-layer_filters>`.
 
diff --git a/docsrc/source/api/api_docs/modules/trainable_infrastructure.rst b/docsrc/source/api/api_docs/modules/trainable_infrastructure.rst
index 5e67d6c34..0efbd5d06 100644
--- a/docsrc/source/api/api_docs/modules/trainable_infrastructure.rst
+++ b/docsrc/source/api/api_docs/modules/trainable_infrastructure.rst
@@ -55,7 +55,7 @@ For example, we can set a trainable weights quantizer with the following configu
 
 .. code-block:: python
 
-    from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+    from model_compression_toolkit.target_platform_capabilities import QuantizationMethod
     from model_compression_toolkit.constants import THRESHOLD, MIN_THRESHOLD
 
     TrainableQuantizerWeightsConfig(weights_quantization_method=QuantizationMethod.SYMMETRIC,
@@ -79,7 +79,7 @@ For example, we can set a trainable activation quantizer with the following conf
 
 .. code-block:: python
 
-    from model_compression_toolkit.target_platform_capabilities.target_platform import QuantizationMethod
+    from model_compression_toolkit.target_platform_capabilities import QuantizationMethod
     from model_compression_toolkit.constants import THRESHOLD, MIN_THRESHOLD
 
     TrainableQuantizerActivationConfig(activation_quantization_method=QuantizationMethod.UNIFORM,
diff --git a/docsrc/source/api/api_docs/notes/tpc_note.rst b/docsrc/source/api/api_docs/notes/tpc_note.rst
index 39558f42a..72a1d8298 100644
--- a/docsrc/source/api/api_docs/notes/tpc_note.rst
+++ b/docsrc/source/api/api_docs/notes/tpc_note.rst
@@ -1,7 +1,7 @@
 
 .. note::
-   For now, some fields of :class:`~model_compression_toolkit.target_platform.OpQuantizationConfig` are ignored during
+   For now, some fields of :class:`model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema.OpQuantizationConfig` are ignored during
    the optimization process such as quantization_preserving, fixed_scale, and fixed_zero_point.
 
-   - MCT will use more information from :class:`~model_compression_toolkit.target_platform.OpQuantizationConfig`, in the future.
+   - MCT will use more information from :class:`~model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema.OpQuantizationConfig`, in the future.
 
diff --git a/tests/pytorch_tests/model_tests/test_feature_models_runner.py b/tests/pytorch_tests/model_tests/test_feature_models_runner.py
index 79bcbd3dc..0c9c1c563 100644
--- a/tests/pytorch_tests/model_tests/test_feature_models_runner.py
+++ b/tests/pytorch_tests/model_tests/test_feature_models_runner.py
@@ -695,15 +695,15 @@ def test_gptq(self):
         GPTQWeightsUpdateTest(self, rounding_type=RoundingType.SoftQuantizer).run_test()
         GPTQLearnRateZeroTest(self, rounding_type=RoundingType.SoftQuantizer).run_test()
         GPTQAccuracyTest(self, rounding_type=RoundingType.SoftQuantizer,
-                         weights_quant_method=QuantizationMethod.UNIFORM).run_test()
+                         weights_quant_method=mct.QuantizationMethod.UNIFORM).run_test()
         GPTQAccuracyTest(self, rounding_type=RoundingType.SoftQuantizer,
-                         weights_quant_method=QuantizationMethod.UNIFORM, per_channel=False,
+                         weights_quant_method=mct.QuantizationMethod.UNIFORM, per_channel=False,
                          params_learning=False).run_test()
         GPTQAccuracyTest(self, rounding_type=RoundingType.SoftQuantizer,
-                         weights_quant_method=QuantizationMethod.UNIFORM,
+                         weights_quant_method=mct.QuantizationMethod.UNIFORM,
                          per_channel=True, hessian_weights=True, log_norm_weights=True, scaled_log_norm=True).run_test()
         GPTQWeightsUpdateTest(self, rounding_type=RoundingType.SoftQuantizer,
-                              weights_quant_method=QuantizationMethod.UNIFORM,
+                              weights_quant_method=mct.QuantizationMethod.UNIFORM,
                               params_learning=False).run_test()  # TODO: When params learning is True, the uniform quantizer gets a min value  > max value
 
     def test_gptq_with_gradual_activation(self):

From 9b0fbb9e7cf034309a5d3de2956cf5dbdfe904fc Mon Sep 17 00:00:00 2001
From: Ofir Gordon <ofirgo@sony.com>
Date: Mon, 13 Jan 2025 20:57:24 +0200
Subject: [PATCH 16/18] fix pytorch test

---
 .../feature_models/multi_head_attention_test.py     | 13 ++++++-------
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py b/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py
index 545f36b19..ce98b4d9f 100644
--- a/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py
+++ b/tests/pytorch_tests/model_tests/feature_models/multi_head_attention_test.py
@@ -20,8 +20,7 @@
 
 import model_compression_toolkit as mct
 from model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema import TargetPlatformCapabilities
-from model_compression_toolkit.core.pytorch.default_framework_info import DEFAULT_PYTORCH_INFO
-from tests.common_tests.helpers.generate_test_tp_model import generate_test_tp_model
+from tests.common_tests.helpers.generate_test_tpc import generate_test_tpc
 from tests.pytorch_tests.model_tests.base_pytorch_test import BasePytorchTest
 
 """
@@ -57,7 +56,7 @@ def create_inputs_shape(self):
 
     def get_tpc(self):
         tpc = {
-            'no_quantization': generate_test_tp_model({
+            'no_quantization': generate_test_tpc({
                 'weights_n_bits': 32,
                 'activation_n_bits': 32,
                 'enable_weights_quantization': False,
@@ -65,10 +64,10 @@ def get_tpc(self):
             })
         }
         if self.num_heads < 5:
-            tpc['all_4bit'] = generate_test_tp_model({'weights_n_bits': 4,
-                                                      'activation_n_bits': 4,
-                                                      'enable_weights_quantization': True,
-                                                      'enable_activation_quantization': True})
+            tpc['all_4bit'] = generate_test_tpc({'weights_n_bits': 4,
+                                                 'activation_n_bits': 4,
+                                                 'enable_weights_quantization': True,
+                                                 'enable_activation_quantization': True})
         return tpc
 
 

From 36014bf1d9c3123703a78786e46a4fb04c693b64 Mon Sep 17 00:00:00 2001
From: Ofir Gordon <ofirgo@sony.com>
Date: Mon, 13 Jan 2025 21:46:11 +0200
Subject: [PATCH 17/18] fix docsrc

---
 docsrc/source/api/api_docs/index.rst          |  4 +-
 ...m.rst => target_platform_capabilities.rst} | 47 +++++--------------
 .../modules/trainable_infrastructure.rst      |  4 +-
 docsrc/source/api/api_docs/notes/tpc_note.rst |  4 +-
 4 files changed, 17 insertions(+), 42 deletions(-)
 rename docsrc/source/api/api_docs/modules/{target_platform.rst => target_platform_capabilities.rst} (69%)

diff --git a/docsrc/source/api/api_docs/index.rst b/docsrc/source/api/api_docs/index.rst
index d2f9119e2..cd78a4b5c 100644
--- a/docsrc/source/api/api_docs/index.rst
+++ b/docsrc/source/api/api_docs/index.rst
@@ -107,10 +107,10 @@ keras_load_quantized_model
 
 
 target_platform_capabilities
-================
+==============================
 - :ref:`target_platform_capabilities<ug-target_platform_capabilities>`: Module to create and model hardware-related settings to optimize the model according to, by the hardware the optimized model will use during inference.
 - :ref:`get_target_platform_capabilities<ug-get_target_platform_capabilities>`: A function to get a target platform model for Tensorflow and Pytorch.
-- :ref:`DefaultDict<ug-DefaultDict>`: Util class for creating a FrameworkQuantizationCapabilities.
+- :ref:`DefaultDict<ug-DefaultDict>`: Util class for creating a TargetPlatformCapabilities.
 
 
 Indices and tables
diff --git a/docsrc/source/api/api_docs/modules/target_platform.rst b/docsrc/source/api/api_docs/modules/target_platform_capabilities.rst
similarity index 69%
rename from docsrc/source/api/api_docs/modules/target_platform.rst
rename to docsrc/source/api/api_docs/modules/target_platform_capabilities.rst
index ed1995a5c..bbd025ff7 100644
--- a/docsrc/source/api/api_docs/modules/target_platform.rst
+++ b/docsrc/source/api/api_docs/modules/target_platform_capabilities.rst
@@ -3,9 +3,9 @@
 .. _ug-target_platform_capabilities:
 
 
-=================================
+=====================================
 target_platform_capabilities Module
-=================================
+=====================================
 
 MCT can be configured to quantize and optimize models for different hardware settings.
 For example, when using qnnpack backend for Pytorch model inference, Pytorch `quantization
@@ -24,7 +24,7 @@ Models for IMX500, TFLite and qnnpack can be observed `here <https://github.com/
 
 |
 
-The object MCT should get called FrameworkQuantizationCapabilities (or shortly TPC).
+The object MCT should get called TargetPlatformCapabilities (or shortly TPC).
 This diagram demonstrates the main components:
 
 .. image:: ../../../../images/tpc.jpg
@@ -42,62 +42,37 @@ QuantizationMethod
 
 OpQuantizationConfig
 ======================
-.. autoclass:: model_compression_toolkit.target_platform_capabilities.OpQuantizationConfig
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema.OpQuantizationConfig
 
 
 
 AttributeQuantizationConfig
 ============================
-.. autoclass:: model_compression_toolkit.target_platform_capabilities.AttributeQuantizationConfig
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema.AttributeQuantizationConfig
 
 
 QuantizationConfigOptions
 ============================
-.. autoclass:: model_compression_toolkit.target_platform_capabilities.QuantizationConfigOptions
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema.QuantizationConfigOptions
 
 
 TargetPlatformCapabilities
-=======================
-.. autoclass:: model_compression_toolkit.target_platform_capabilities.TargetPlatformCapabilities
+============================
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema.TargetPlatformCapabilities
 
 
 OperatorsSet
 ================
-.. autoclass:: model_compression_toolkit.target_platform_capabilities.OperatorsSet
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema.OperatorsSet
 
 
 
 Fusing
 ==============
-.. autoclass:: model_compression_toolkit.target_platform_capabilities.Fusing
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema.Fusing
 
 
 
 OperatorSetGroup
 ====================
-.. autoclass:: model_compression_toolkit.target_platform_capabilities.OperatorSetGroup
-
-
-OperationsToLayers
-=====================
-.. autoclass:: model_compression_toolkit.target_platform_capabilities.OperationsToLayers
-
-
-OperationsSetToLayers
-=========================
-.. autoclass:: model_compression_toolkit.target_platform_capabilities.OperationsSetToLayers
-
-
-LayerFilterParams
-=========================
-.. autoclass:: model_compression_toolkit.target_platform_capabilities.LayerFilterParams
-
-More filters and usage examples are detailed :ref:`here<ug-layer_filters>`.
-
-
-FrameworkQuantizationCapabilities
-=============================
-.. autoclass:: model_compression_toolkit.target_platform.FrameworkQuantizationCapabilities
-
-
-
+.. autoclass:: model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema.OperatorSetGroup
diff --git a/docsrc/source/api/api_docs/modules/trainable_infrastructure.rst b/docsrc/source/api/api_docs/modules/trainable_infrastructure.rst
index 0efbd5d06..42541f22f 100644
--- a/docsrc/source/api/api_docs/modules/trainable_infrastructure.rst
+++ b/docsrc/source/api/api_docs/modules/trainable_infrastructure.rst
@@ -55,7 +55,7 @@ For example, we can set a trainable weights quantizer with the following configu
 
 .. code-block:: python
 
-    from model_compression_toolkit.target_platform_capabilities import QuantizationMethod
+    from model_compression_toolkit.target_platform_capabilities.target_platform_capabilities import QuantizationMethod
     from model_compression_toolkit.constants import THRESHOLD, MIN_THRESHOLD
 
     TrainableQuantizerWeightsConfig(weights_quantization_method=QuantizationMethod.SYMMETRIC,
@@ -79,7 +79,7 @@ For example, we can set a trainable activation quantizer with the following conf
 
 .. code-block:: python
 
-    from model_compression_toolkit.target_platform_capabilities import QuantizationMethod
+    from model_compression_toolkit.target_platform_capabilities.target_platform_capabilities import QuantizationMethod
     from model_compression_toolkit.constants import THRESHOLD, MIN_THRESHOLD
 
     TrainableQuantizerActivationConfig(activation_quantization_method=QuantizationMethod.UNIFORM,
diff --git a/docsrc/source/api/api_docs/notes/tpc_note.rst b/docsrc/source/api/api_docs/notes/tpc_note.rst
index 72a1d8298..7ced4a5d6 100644
--- a/docsrc/source/api/api_docs/notes/tpc_note.rst
+++ b/docsrc/source/api/api_docs/notes/tpc_note.rst
@@ -1,7 +1,7 @@
 
 .. note::
-   For now, some fields of :class:`model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema.OpQuantizationConfig` are ignored during
+   For now, some fields of :class:`~model_compression_toolkit.target_platform_capabilities.OpQuantizationConfig` are ignored during
    the optimization process such as quantization_preserving, fixed_scale, and fixed_zero_point.
 
-   - MCT will use more information from :class:`~model_compression_toolkit.target_platform_capabilities.schema.mct_current_schema.OpQuantizationConfig`, in the future.
+   - MCT will use more information from :class:`~model_compression_toolkit.target_platform_capabilities.OpQuantizationConfig`, in the future.
 

From 3e2a701a6833aac3d4dadd05308c9b2f5345ab09 Mon Sep 17 00:00:00 2001
From: Ofir Gordon <ofirgo@sony.com>
Date: Mon, 13 Jan 2025 21:50:30 +0200
Subject: [PATCH 18/18] fix QuantizationMethod import in tests

---
 .../model_tests/test_feature_models_runner.py            | 9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/tests/pytorch_tests/model_tests/test_feature_models_runner.py b/tests/pytorch_tests/model_tests/test_feature_models_runner.py
index 0c9c1c563..208818613 100644
--- a/tests/pytorch_tests/model_tests/test_feature_models_runner.py
+++ b/tests/pytorch_tests/model_tests/test_feature_models_runner.py
@@ -19,7 +19,6 @@
 import torch
 from torch import nn
 
-import model_compression_toolkit as mct
 from mct_quantizers import QuantizationMethod
 from model_compression_toolkit.core.common.mixed_precision.distance_weighting import MpDistanceWeighting
 from model_compression_toolkit.core.common.network_editors import NodeTypeFilter, NodeNameFilter
@@ -695,15 +694,15 @@ def test_gptq(self):
         GPTQWeightsUpdateTest(self, rounding_type=RoundingType.SoftQuantizer).run_test()
         GPTQLearnRateZeroTest(self, rounding_type=RoundingType.SoftQuantizer).run_test()
         GPTQAccuracyTest(self, rounding_type=RoundingType.SoftQuantizer,
-                         weights_quant_method=mct.QuantizationMethod.UNIFORM).run_test()
+                         weights_quant_method=QuantizationMethod.UNIFORM).run_test()
         GPTQAccuracyTest(self, rounding_type=RoundingType.SoftQuantizer,
-                         weights_quant_method=mct.QuantizationMethod.UNIFORM, per_channel=False,
+                         weights_quant_method=QuantizationMethod.UNIFORM, per_channel=False,
                          params_learning=False).run_test()
         GPTQAccuracyTest(self, rounding_type=RoundingType.SoftQuantizer,
-                         weights_quant_method=mct.QuantizationMethod.UNIFORM,
+                         weights_quant_method=QuantizationMethod.UNIFORM,
                          per_channel=True, hessian_weights=True, log_norm_weights=True, scaled_log_norm=True).run_test()
         GPTQWeightsUpdateTest(self, rounding_type=RoundingType.SoftQuantizer,
-                              weights_quant_method=mct.QuantizationMethod.UNIFORM,
+                              weights_quant_method=QuantizationMethod.UNIFORM,
                               params_learning=False).run_test()  # TODO: When params learning is True, the uniform quantizer gets a min value  > max value
 
     def test_gptq_with_gradual_activation(self):