diff --git a/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py b/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py index abd039ba5..0a9410204 100644 --- a/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py +++ b/model_compression_toolkit/core/common/graph/memory_graph/bipartite_graph.py @@ -75,9 +75,7 @@ def _verify_edges(self, edges_list: List[Tuple[Any, Any]]): edges_list: A list of edges to verify their correction. """ for n1, n2 in edges_list: - if n1 in self.a_nodes and n2 in self.a_nodes: - Logger.critical(f"Attempted to add edge {(n1, n2)} between nodes of the same partition in a bipartite graph, violating bipartite properties.") - if n1 in self.b_nodes and n2 in self.b_nodes: + if (n1 in self.a_nodes and n2 in self.a_nodes) or (n1 in self.b_nodes and n2 in self.b_nodes): Logger.critical(f"Attempted to add edge {(n1, n2)} between nodes of the same partition in a bipartite graph, violating bipartite properties.") def add_nodes_to_a(self, new_nodes: List[Any]): diff --git a/model_compression_toolkit/core/common/hessian/trace_hessian_calculator.py b/model_compression_toolkit/core/common/hessian/trace_hessian_calculator.py index 892be116a..539f4b14e 100644 --- a/model_compression_toolkit/core/common/hessian/trace_hessian_calculator.py +++ b/model_compression_toolkit/core/common/hessian/trace_hessian_calculator.py @@ -50,14 +50,14 @@ def __init__(self, for output_node in graph.get_outputs(): if not fw_impl.is_output_node_compatible_for_hessian_score_computation(output_node.node): - Logger.critical(f"All graph outputs must support Hessian score computation. Incompatible node: {output_node.node}, layer type: {output_node.node.type}. Consider disabling Hessian info computation") + Logger.critical(f"All graph outputs must support Hessian score computation. Incompatible node: {output_node.node}, layer type: {output_node.node.type}. Consider disabling Hessian info computation.") self.input_images = fw_impl.to_tensor(input_images) self.num_iterations_for_approximation = num_iterations_for_approximation # Validate representative dataset has same inputs as graph if len(self.input_images)!=len(graph.get_inputs()): - Logger.critical(f"The graph requires {len(graph.get_inputs())} inputs, but the provided representative dataset contains {len(self.input_images)} inputs") + Logger.critical(f"The graph requires {len(graph.get_inputs())} inputs, but the provided representative dataset contains {len(self.input_images)} inputs.") # Assert all inputs have a batch size of 1 for image in self.input_images: diff --git a/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_methods.py b/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_methods.py index 707e73720..aedec90c7 100644 --- a/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_methods.py +++ b/model_compression_toolkit/core/common/mixed_precision/kpi_tools/kpi_methods.py @@ -288,8 +288,7 @@ def _bops_kpi(mp_cfg: List[int], # If node doesn't have weights then its MAC count is 0, and we shouldn't consider it in the BOPS count. incoming_edges = graph.incoming_edges(n, sort_by_attr=EDGE_SINK_INDEX) if len(incoming_edges) != 1: - Logger.critical(f"Unable to compute BOPS metric for node {n.name} due to multiple inputs") # pragma: no cover - + Logger.critical(f"Unable to compute BOPS metric for node {n.name} due to multiple inputs.") # pragma: no cover input_activation_node = incoming_edges[0].source_node if len(graph.out_edges(input_activation_node)) > 1: # In the case where the activation node has multiple outgoing edges diff --git a/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py b/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py index 7d9bb8702..3fa275c6d 100644 --- a/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py +++ b/model_compression_toolkit/core/common/mixed_precision/search_methods/linear_programming.py @@ -51,7 +51,7 @@ def mp_integer_programming_search(search_manager: MixedPrecisionSearchManager, # bitwidth index to the observed sensitivity of the model when using that bitwidth for that layer. if target_kpi is None or search_manager is None: - Logger.critical("Invalid parameters: target_kpi and search_manager must not be None for mixed-precision search. Ensure valid inputs are provided.") + Logger.critical("Invalid parameters: 'target_kpi' and 'search_manager' must not be 'None' for mixed-precision search. Ensure valid inputs are provided.") layer_to_metrics_mapping = _build_layer_to_metrics_mapping(search_manager, target_kpi) @@ -70,7 +70,7 @@ def mp_integer_programming_search(search_manager: MixedPrecisionSearchManager, lp_problem.solve(solver=solver) # Try to solve the problem. assert lp_problem.status == LpStatusOptimal, Logger.critical( - "No solution found for the LP problem") + "No solution found for the LP problem.") Logger.info(LpStatus[lp_problem.status]) # Take the bitwidth index only if its corresponding indicator is one. @@ -176,7 +176,7 @@ def _formalize_problem(layer_to_indicator_vars_mapping: Dict[int, Dict[int, LpVa lp_problem=lp_problem, non_conf_kpi_vector=non_conf_kpi_vector) else: # pragma: no cover - raise Logger.critical("Unable to execute mixed-precision search: 'target_kpi' is None. A valid 'target_kpi' is required.") + Logger.critical("Unable to execute mixed-precision search: 'target_kpi' is None. A valid 'target_kpi' is required.") return lp_problem diff --git a/model_compression_toolkit/core/common/pruning/prune_graph.py b/model_compression_toolkit/core/common/pruning/prune_graph.py index 5d732b39e..0f44bae0b 100644 --- a/model_compression_toolkit/core/common/pruning/prune_graph.py +++ b/model_compression_toolkit/core/common/pruning/prune_graph.py @@ -58,7 +58,7 @@ def build_pruned_graph(graph: Graph, # Retrieve the corresponding mask using the node's name (since we use a graph's copy). mask = [v for k, v in masks.items() if k.name == pruning_section.entry_node.name] if len(mask) != 1: - Logger.critical(f"Expected to find a single node with name {pruning_section.entry_node.name} in masks dictionary, but found {len(mask)}") + Logger.critical(f"Expected to find a single node with name {pruning_section.entry_node.name} in masks dictionary, but found {len(mask)}.") mask = mask[0] # If the mask indicates that some channels are to be pruned, apply it. diff --git a/model_compression_toolkit/core/common/pruning/pruning_info.py b/model_compression_toolkit/core/common/pruning/pruning_info.py index f9f30e067..61ac32d63 100644 --- a/model_compression_toolkit/core/common/pruning/pruning_info.py +++ b/model_compression_toolkit/core/common/pruning/pruning_info.py @@ -76,7 +76,7 @@ def unroll_simd_scores_to_per_channel_scores(simd_scores: Dict[BaseNode, np.ndar """ if simd_scores is None or simd_groups_indices is None: Logger.critical(f"Failed to find scores and indices to create unrolled scores for pruning information." - f" Scores: {simd_scores}, Group indices: {simd_groups_indices}") + f" Scores: {simd_scores}, Group indices: {simd_groups_indices}.") _scores = {} for node, groups_indices in simd_groups_indices.items(): node_scores = simd_scores[node] diff --git a/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py b/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py index 913f2bf88..6d55312e6 100644 --- a/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py +++ b/model_compression_toolkit/core/common/quantization/quantizers/quantizers_helpers.py @@ -238,7 +238,7 @@ def get_tensor_max(tensor_data: np.ndarray, """ if n_bits < 1: - Logger.critical(f"Parameter n_bits must be positive; however n_bits={n_bits} was provided.") + Logger.critical(f"Parameter n_bits must be positive; however 'n_bits'={n_bits} was provided.") if is_uniform_quantization: expansion_factor = 1.0 elif n_bits == 1: diff --git a/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py b/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py index 79e953194..1ce52016b 100644 --- a/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py +++ b/model_compression_toolkit/core/common/substitutions/batchnorm_refusing.py @@ -103,13 +103,13 @@ def substitute(self, # If the linear operator is part of a reused group (it is the "base" node, or a reused node), # we should skip the substitution. if source_node.is_reused(): - Logger.critical("BN folding substitution cannot proceed if the linear operator is part of a reused group") # pragma: no cover + Logger.critical("BN folding substitution cannot proceed if the linear operator is part of a reused group.") # pragma: no cover bn_node = edge_nodes[1] if len(graph.get_next_nodes(source_node)) > 1 or len(graph.get_prev_nodes(bn_node)) > 1: Logger.critical( - "BN folding substitution cannot proceed if the linear operator has multiple outputs or the BN layer has multiple inputs") # pragma: no cover + "BN folding substitution cannot proceed if the linear operator has multiple outputs or the BN layer has multiple inputs.") # pragma: no cover kernel = source_node.get_weights_by_keys(self.kernel_str) bias = source_node.get_weights_by_keys(self.bias_str) diff --git a/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py b/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py index 02a625a99..47bc827f8 100644 --- a/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py +++ b/model_compression_toolkit/core/common/substitutions/shift_negative_activation.py @@ -134,7 +134,7 @@ def insert_node_after_node(graph: Graph, last_nodes = graph.get_next_nodes(first_node) if len(last_nodes) != 1: - Logger.critical('Insertion requires exactly one successor node; multiple or no successors found.') # pragma: no cover + Logger.critical(f'Insertion requires exactly one successor node; {len(last_nodes)} successors found.') # pragma: no cover last_node = last_nodes[0] insert_node_between_two_nodes(graph, node_to_insert, first_node, last_node) diff --git a/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py b/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py index 7b18da4e4..a495ed160 100644 --- a/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py +++ b/model_compression_toolkit/core/keras/pruning/pruning_keras_implementation.py @@ -172,7 +172,7 @@ def attrs_oi_channels_info_for_pruning(self, if fw_info.is_kernel_op(node.type): kernel_attributes = fw_info.get_kernel_op_attributes(node.type) if kernel_attributes is None or len(kernel_attributes)==0: - Logger.critical(f"Expected kernel attributes for operation, found None or empty.") + Logger.critical(f"Expected kernel attributes for operation for node type {node.type}, found None or empty.") for attr in kernel_attributes: attributes_with_axis[attr] = fw_info.kernel_channels_mapping.get(node.type) diff --git a/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py b/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py index 0b86b5f48..93ad2f4a9 100644 --- a/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py +++ b/model_compression_toolkit/core/keras/quantizer/fake_quant_builder.py @@ -71,7 +71,7 @@ def power_of_two_quantization(activation_n_bits: int, if activation_threshold is None: Logger.critical("Activation threshold must be specified.") # pragma: no cover if activation_is_signed is None: - Logger.critical("Parameter 'activation_is_signed' must be specified") # pragma: no cover + Logger.critical("Parameter 'activation_is_signed' must be specified.") # pragma: no cover if not threshold_is_power_of_two(activation_threshold, per_channel=False): Logger.critical("Activation threshold must be a power of two.") # pragma: no cover diff --git a/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py b/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py index 077ddc376..ff0631e84 100644 --- a/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py +++ b/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/functional_batch_norm.py @@ -47,7 +47,7 @@ def get_attributes_from_weights(node: BaseNode) -> Dict: Weights dictionary for BatchNorm2d. """ if 1 not in node.weights and 2 not in node.weights: - Logger.critical(f'Missing {MOVING_MEAN} and {MOVING_VARIANCE} in functional batch_norm inputs') + Logger.critical(f'Missing {MOVING_MEAN} and {MOVING_VARIANCE} in functional batch_norm inputs.') weights_dict = {MOVING_MEAN: node.weights[1], MOVING_VARIANCE: node.weights[2], GAMMA: np.ones(node.weights[1].shape), diff --git a/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py b/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py index e90df60fd..2c1a41655 100644 --- a/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py +++ b/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/linear_collapsing.py @@ -101,7 +101,7 @@ def conv2d_collapsing_fn(first_node: BaseNode, return kernel_collapsed, bias_collapsed else: - Logger.critical(f"Layer collapsing is not supported for the combination of {first_node.type} and {second_node.type}") + Logger.critical(f"Layer collapsing is not supported for the combination of {first_node.type} and {second_node.type}.") def pytorch_linear_collapsing() -> Conv2DCollapsing: diff --git a/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py b/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py index b8b5b7ab0..2de5037a8 100644 --- a/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py +++ b/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/multi_head_attention_decomposition.py @@ -54,7 +54,7 @@ def __init__(self, # Add Zero Attn feature is Not Implemented if ADD_ZERO_ATTN in mha_node.framework_attr.keys(): if mha_node.framework_attr[ADD_ZERO_ATTN] is not False: - Logger.critical('Add Zero Attention (Add Zero Attn) feature is not implemented') # pragma: no cover + Logger.critical('Add Zero Attention (Add Zero Attn) feature is not implemented.') # pragma: no cover # Check if Add Bias KV feature is Active if BIAS_K and BIAS_V in mha_node.weights.keys(): diff --git a/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py b/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py index 9577688c2..709526e52 100644 --- a/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py +++ b/model_compression_toolkit/core/pytorch/graph_substitutions/substitutions/reshape_with_static_shapes.py @@ -58,7 +58,7 @@ def substitute(self, if len(node.output_shape) == 1: node.output_shape[0][0] = BATCH_DIM_VALUE else: - Logger.critical("This substitution handles 'reshape' or 'view' nodes with a single output shape") # pragma: no cover + Logger.critical("This substitution handles 'reshape' or 'view' nodes with a single output shape.") # pragma: no cover # configure the new static output shape attribute node.op_call_args = node.output_shape diff --git a/model_compression_toolkit/core/pytorch/kpi_data_facade.py b/model_compression_toolkit/core/pytorch/kpi_data_facade.py index 2396c7b83..5db974a40 100644 --- a/model_compression_toolkit/core/pytorch/kpi_data_facade.py +++ b/model_compression_toolkit/core/pytorch/kpi_data_facade.py @@ -89,5 +89,5 @@ def pytorch_kpi_data(in_model: Module, # If torch is not installed, # we raise an exception when trying to use this function. def pytorch_kpi_data(*args, **kwargs): - Logger.critical("PyTorch must be installed to use pytorch_kpi_data. The 'torch' package is missing.") # pragma: no cover + Logger.critical("PyTorch must be installed to use 'pytorch_kpi_data'. The 'torch' package is missing.") # pragma: no cover diff --git a/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py b/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py index ac7d26f52..a1e32cbb5 100644 --- a/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py +++ b/model_compression_toolkit/core/pytorch/pruning/pruning_pytorch_implementation.py @@ -184,7 +184,7 @@ def attrs_oi_channels_info_for_pruning(self, if fw_info.is_kernel_op(node.type): kernel_attributes = fw_info.get_kernel_op_attributes(node.type) if kernel_attributes is None or len(kernel_attributes) == 0: - Logger.critical(f"Expected to find kernel attributes but none were identified.") + Logger.critical(f"Expected to find kernel attributes but none were identified for node '{node.name}' of type {node.type}.") for attr in kernel_attributes: attributes_with_axis[attr] = fw_info.kernel_channels_mapping.get(node.type) diff --git a/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py b/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py index e6e200fb2..229e0a1d5 100644 --- a/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py +++ b/model_compression_toolkit/data_generation/pytorch/pytorch_data_generation.py @@ -359,5 +359,5 @@ def get_pytorch_data_generation_config(*args, **kwargs): def pytorch_data_generation_experimental(*args, **kwargs): - Logger.critical('PyTorch must be installed to use pytorch_data_generation_experimental. ' + Logger.critical("PyTorch must be installed to use 'pytorch_data_generation_experimental'. " "The 'torch' package is missing.") # pragma: no cover diff --git a/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py b/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py index a241b88b4..36676e610 100644 --- a/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py +++ b/model_compression_toolkit/exporter/model_exporter/pytorch/pytorch_export_facade.py @@ -103,5 +103,5 @@ def pytorch_export_model(model: torch.nn.Module, else: def pytorch_export_model(*args, **kwargs): - Logger.critical('PyTorch must be installed to use pytorch_export_model. ' + Logger.critical("PyTorch must be installed to use 'pytorch_export_model'. " "The 'torch' package is missing.") # pragma: no cover diff --git a/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py b/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py index 55e142304..e7a9871b9 100644 --- a/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py +++ b/model_compression_toolkit/exporter/model_wrapper/pytorch/builder/fully_quantized_model_builder.py @@ -84,5 +84,5 @@ def get_exportable_pytorch_model(graph: Graph): else: def get_exportable_pytorch_model(*args, **kwargs): - Logger.critical('PyTorch must be installed to use get_exportable_pytorch_model. ' + Logger.critical("PyTorch must be installed to use 'get_exportable_pytorch_model'. " "The 'torch' package is missing.") # pragma: no cover \ No newline at end of file diff --git a/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py b/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py index 22ee33e93..0da2a9f8f 100644 --- a/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py +++ b/model_compression_toolkit/exporter/model_wrapper/pytorch/validate_layer.py @@ -35,14 +35,14 @@ def is_pytorch_layer_exportable(layer: Any) -> bool: Check whether a PyTorch layer is a valid exportable layer or not. """ if not isinstance(layer, nn.Module): - Logger.critical(f'Exportable layer must be a nn.Module layer, but layer {layer.name} is of type {type(layer)}') # pragma: no cover + Logger.critical(f'Exportable layer must be a nn.Module layer, but layer {layer.name} is of type {type(layer)}.') # pragma: no cover if isinstance(layer, PytorchQuantizationWrapper): valid_weights_quantizers = isinstance(layer.weights_quantizers, dict) if not valid_weights_quantizers: Logger.critical( f'PytorchQuantizationWrapper must have a weights_quantizers but has a ' - f'{type(layer.weights_quantizers)} object') # pragma: no cover + f'{type(layer.weights_quantizers)} object.') # pragma: no cover if len(layer.weights_quantizers) == 0: Logger.critical(f'PytorchQuantizationWrapper must have at least one weight quantizer, but found {len(layer.weights_quantizers)} quantizers.' @@ -52,18 +52,18 @@ def is_pytorch_layer_exportable(layer: Any) -> bool: if not isinstance(weights_quantizer, BasePyTorchInferableQuantizer): Logger.critical( f'weights_quantizer must be a BasePyTorchInferableQuantizer object but has a ' - f'{type(weights_quantizer)} object') # pragma: no cover + f'{type(weights_quantizer)} object.') # pragma: no cover elif isinstance(layer, PytorchActivationQuantizationHolder): if not isinstance(layer.activation_holder_quantizer, BasePyTorchInferableQuantizer): Logger.critical( f'activation quantizer in PytorchActivationQuantizationHolder' f' must be a BasePyTorchInferableQuantizer object but has a ' - f'{type(layer.activation_holder_quantizer)} object') # pragma: no cover + f'{type(layer.activation_holder_quantizer)} object.') # pragma: no cover return True else: def is_pytorch_layer_exportable(*args, **kwargs): # pragma: no cover - Logger.critical('PyTorch must be installed to use is_pytorch_layer_exportable. ' + Logger.critical("PyTorch must be installed to use 'is_pytorch_layer_exportable'. " "The 'torch' package is missing.") # pragma: no cover \ No newline at end of file diff --git a/model_compression_toolkit/gptq/pytorch/quantization_facade.py b/model_compression_toolkit/gptq/pytorch/quantization_facade.py index 06d47fc0c..8faf167b0 100644 --- a/model_compression_toolkit/gptq/pytorch/quantization_facade.py +++ b/model_compression_toolkit/gptq/pytorch/quantization_facade.py @@ -202,10 +202,10 @@ def pytorch_gradient_post_training_quantization(model: Module, # If torch is not installed, # we raise an exception when trying to use these functions. def get_pytorch_gptq_config(*args, **kwargs): - Logger.critical("PyTorch must be installed to use get_pytorch_gptq_config. " + Logger.critical("PyTorch must be installed to use 'get_pytorch_gptq_config'. " "The 'torch' package is missing.") # pragma: no cover def pytorch_gradient_post_training_quantization(*args, **kwargs): - Logger.critical("PyTorch must be installed to use pytorch_gradient_post_training_quantization. " + Logger.critical("PyTorch must be installed to use 'pytorch_gradient_post_training_quantization'. " "The 'torch' package is missing.") # pragma: no cover diff --git a/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py b/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py index a1cf4bbbe..810a6bfb7 100644 --- a/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py +++ b/model_compression_toolkit/gptq/pytorch/quantizer/base_pytorch_gptq_quantizer.py @@ -87,5 +87,5 @@ def get_quant_config(self): else: class BasePytorchGPTQTrainableQuantizer: # pragma: no cover def __init__(self, *args, **kwargs): - Logger.critical("PyTorch must be installed to use BasePytorchGPTQTrainableQuantizer. " + Logger.critical("PyTorch must be installed to use 'BasePytorchGPTQTrainableQuantizer'. " "The 'torch' package is missing.") # pragma: no cover diff --git a/model_compression_toolkit/pruning/pytorch/pruning_facade.py b/model_compression_toolkit/pruning/pytorch/pruning_facade.py index 2d83d7d45..64f29b81f 100644 --- a/model_compression_toolkit/pruning/pytorch/pruning_facade.py +++ b/model_compression_toolkit/pruning/pytorch/pruning_facade.py @@ -165,5 +165,5 @@ def pytorch_pruning_experimental(*args, **kwargs): Raises: CriticalError: Indicates that PyTorch must be installed to use this function. """ - Logger.critical('PyTorch must be installed to use pytorch_pruning_experimental. ' + Logger.critical("PyTorch must be installed to use 'pytorch_pruning_experimental'. " "The 'torch' package is missing.") # pragma: no cover diff --git a/model_compression_toolkit/ptq/pytorch/quantization_facade.py b/model_compression_toolkit/ptq/pytorch/quantization_facade.py index 89a764b7b..55943f6cd 100644 --- a/model_compression_toolkit/ptq/pytorch/quantization_facade.py +++ b/model_compression_toolkit/ptq/pytorch/quantization_facade.py @@ -128,5 +128,5 @@ def pytorch_post_training_quantization(in_module: Module, # If torch is not installed, # we raise an exception when trying to use these functions. def pytorch_post_training_quantization(*args, **kwargs): - Logger.critical('PyTorch must be installed to use pytorch_post_training_quantization_experimental. ' + Logger.critical("PyTorch must be installed to use 'pytorch_post_training_quantization_experimental'. " "The 'torch' package is missing.") # pragma: no cover diff --git a/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py b/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py index 1a8be58af..e0ee6471b 100644 --- a/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py +++ b/model_compression_toolkit/qat/keras/quantizer/quantization_builder.py @@ -49,7 +49,7 @@ def get_activation_quantizer_holder(n: common.BaseNode, # quantization, which in this case has an empty list). if len(activation_quantizers) == 1: return KerasActivationQuantizationHolder(activation_quantizers[0]) - Logger.critical(f'KerasActivationQuantizationHolder supports a single quantizer but {len(activation_quantizers)} quantizers were found for node {n}') + Logger.critical(f'KerasActivationQuantizationHolder supports a single quantizer but {len(activation_quantizers)} quantizers were found for node {n}.') def quantization_builder(n: common.BaseNode, diff --git a/model_compression_toolkit/qat/pytorch/quantization_facade.py b/model_compression_toolkit/qat/pytorch/quantization_facade.py index 4d0840807..c3a478380 100644 --- a/model_compression_toolkit/qat/pytorch/quantization_facade.py +++ b/model_compression_toolkit/qat/pytorch/quantization_facade.py @@ -244,5 +244,5 @@ def pytorch_quantization_aware_training_init_experimental(*args, **kwargs): def pytorch_quantization_aware_training_finalize_experimental(*args, **kwargs): - Logger.critical('PyTorch must be installed to use pytorch_quantization_aware_training_finalize_experimental. ' + Logger.critical("PyTorch must be installed to use 'pytorch_quantization_aware_training_finalize_experimental'. " "The 'torch' package is missing.") # pragma: no cover diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py b/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py index 6a14f14dd..c0bba5a2e 100644 --- a/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py +++ b/model_compression_toolkit/target_platform_capabilities/target_platform/target_platform_model.py @@ -192,7 +192,7 @@ def __validate_model(self): """ opsets_names = [op.name for op in self.operator_set] if (len(set(opsets_names)) != len(opsets_names)): - Logger.critical(f'OperatorsSet must have unique names.') + Logger.critical(f'Operator Sets must have unique names.') def get_default_config(self) -> OpQuantizationConfig: """ diff --git a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py index 6c31aaea1..ce6f034dc 100644 --- a/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py +++ b/model_compression_toolkit/target_platform_capabilities/target_platform/targetplatform2framework/current_tpc.py @@ -39,7 +39,7 @@ def get(self): """ if self.tpc is None: - Logger.critical("'TargetPlatformCapabilities' instance is not initialized.") + Logger.critical("'TargetPlatformCapabilities' (TPC) instance is not initialized.") return self.tpc def reset(self): diff --git a/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py b/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py index 2e874c011..e5dab2ae9 100644 --- a/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py +++ b/model_compression_toolkit/trainable_infrastructure/pytorch/base_pytorch_quantizer.py @@ -60,6 +60,6 @@ class BasePytorchTrainableQuantizer(BaseTrainableQuantizer): def __init__(self, quantization_config: Union[TrainableQuantizerWeightsConfig, TrainableQuantizerActivationConfig]): super().__init__(quantization_config) - Logger.critical("PyTorch must be installed to use BasePytorchTrainableQuantizer. " + Logger.critical("PyTorch must be installed to use 'BasePytorchTrainableQuantizer'. " "The 'torch' package is missing.") # pragma: no cover