From 2cb6fe1b433195280274a9e4aaf86cf3fbc75200 Mon Sep 17 00:00:00 2001 From: Jan-Frederik Schulte Date: Tue, 27 Aug 2024 08:33:54 -0400 Subject: [PATCH] Add functionality to use granularity option also for pytorch models (#1051) * allow granularity options in pytorch parser * pre-commit * [pre-commit.ci] auto fixes from pre-commit hooks * add torch to setup? * add torch to setup2? * add torch to setup3? * add torch to requirements * fix failing pytest * adapat new batchnorm pytests to changes in interface * addressing comments from Vladimir and Jovan * remvoving torch from requirements --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- hls4ml/converters/__init__.py | 9 +-- hls4ml/converters/pytorch_to_hls.py | 44 ++++++++--- hls4ml/utils/config.py | 79 +++++++++++++++++++ test/pytest/test_backend_config.py | 33 +++++--- test/pytest/test_batchnorm_pytorch.py | 15 ++-- test/pytest/test_merge_pytorch.py | 8 +- test/pytest/test_pytorch_api.py | 79 ++++++++----------- test/pytest/test_recurrent_pytorch.py | 19 +++-- .../pytest/test_sequential_parsing_pytorch.py | 12 +-- test/pytest/test_upsampling_pytorch.py | 6 +- 10 files changed, 204 insertions(+), 100 deletions(-) diff --git a/hls4ml/converters/__init__.py b/hls4ml/converters/__init__.py index 3bd6d06c3b..092e53b3d3 100644 --- a/hls4ml/converters/__init__.py +++ b/hls4ml/converters/__init__.py @@ -10,6 +10,8 @@ from hls4ml.converters.keras_to_hls import get_supported_keras_layers # noqa: F401 from hls4ml.converters.keras_to_hls import parse_keras_model # noqa: F401 from hls4ml.converters.keras_to_hls import keras_to_hls, register_keras_layer_handler + +# from hls4ml.converters.pytorch_to_hls import parse_pytorch_model # noqa: F401 from hls4ml.model import ModelGraph from hls4ml.utils.config import create_config from hls4ml.utils.symbolic_utils import LUTFunction @@ -238,7 +240,6 @@ def convert_from_keras_model( def convert_from_pytorch_model( model, - input_shape, output_dir='my-hls-test', project_name='myproject', input_data_tb=None, @@ -251,7 +252,6 @@ def convert_from_pytorch_model( Args: model: PyTorch model to convert. - input_shape (list): The shape of the input tensor. First element is the batch size, needs to be None output_dir (str, optional): Output directory of the generated HLS project. Defaults to 'my-hls-test'. project_name (str, optional): Name of the HLS project. Defaults to 'myproject'. input_data_tb (str, optional): String representing the path of input data in .npy or .dat format that will be @@ -293,7 +293,6 @@ def convert_from_pytorch_model( config = create_config(output_dir=output_dir, project_name=project_name, backend=backend, **kwargs) config['PytorchModel'] = model - config['InputShape'] = input_shape config['InputData'] = input_data_tb config['OutputPredictions'] = output_data_tb config['HLSConfig'] = {} @@ -301,9 +300,9 @@ def convert_from_pytorch_model( if hls_config is None: hls_config = {} - model_config = hls_config.get('Model', None) + model_config = hls_config.get('Model') config['HLSConfig']['Model'] = _check_model_config(model_config) - + config['InputShape'] = hls_config.get('InputShape') _check_hls_config(config, hls_config) return pytorch_to_hls(config) diff --git a/hls4ml/converters/pytorch_to_hls.py b/hls4ml/converters/pytorch_to_hls.py index bd483b3690..40336835a6 100644 --- a/hls4ml/converters/pytorch_to_hls.py +++ b/hls4ml/converters/pytorch_to_hls.py @@ -102,7 +102,7 @@ def decorator(function): # ---------------------------------------------------------------- -def pytorch_to_hls(config): +def parse_pytorch_model(config, verbose=True): """Convert PyTorch model to hls4ml ModelGraph. Args: @@ -118,14 +118,15 @@ def pytorch_to_hls(config): # This is a list of dictionaries to hold all the layer info we need to generate HLS layer_list = [] - print('Interpreting Model ...') - + if verbose: + print('Interpreting Model ...') reader = PyTorchFileReader(config) if isinstance(config['PytorchModel'], str) else PyTorchModelReader(config) if type(reader.input_shape) is tuple: input_shapes = [list(reader.input_shape)] else: input_shapes = list(reader.input_shape) - input_shapes = [list(shape) for shape in input_shapes] + # first element needs to 'None' as placeholder for the batch size, insert it if not present + input_shapes = [[None] + list(shape) if shape[0] is not None else list(shape) for shape in input_shapes] model = reader.torch_model @@ -151,7 +152,8 @@ def pytorch_to_hls(config): output_shape = None # Loop through layers - print('Topology:') + if verbose: + print('Topology:') layer_counter = 0 n_inputs = 0 @@ -226,13 +228,14 @@ def pytorch_to_hls(config): pytorch_class, layer_name, input_names, input_shapes, node, class_object, reader, config ) - print( - 'Layer name: {}, layer type: {}, input shape: {}'.format( - layer['name'], - layer['class_name'], - input_shapes, + if verbose: + print( + 'Layer name: {}, layer type: {}, input shape: {}'.format( + layer['name'], + layer['class_name'], + input_shapes, + ) ) - ) layer_list.append(layer) assert output_shape is not None @@ -288,7 +291,12 @@ def pytorch_to_hls(config): operation, layer_name, input_names, input_shapes, node, None, reader, config ) - print('Layer name: {}, layer type: {}, input shape: {}'.format(layer['name'], layer['class_name'], input_shapes)) + if verbose: + print( + 'Layer name: {}, layer type: {}, input shape: {}'.format( + layer['name'], layer['class_name'], input_shapes + ) + ) layer_list.append(layer) assert output_shape is not None @@ -342,7 +350,12 @@ def pytorch_to_hls(config): operation, layer_name, input_names, input_shapes, node, None, reader, config ) - print('Layer name: {}, layer type: {}, input shape: {}'.format(layer['name'], layer['class_name'], input_shapes)) + if verbose: + print( + 'Layer name: {}, layer type: {}, input shape: {}'.format( + layer['name'], layer['class_name'], input_shapes + ) + ) layer_list.append(layer) assert output_shape is not None @@ -351,6 +364,11 @@ def pytorch_to_hls(config): if len(input_layers) == 0: input_layers = None + return layer_list, input_layers + + +def pytorch_to_hls(config): + layer_list, input_layers = parse_pytorch_model(config) print('Creating HLS model') hls_model = ModelGraph(config, layer_list, inputs=input_layers) return hls_model diff --git a/hls4ml/utils/config.py b/hls4ml/utils/config.py index 1a297787d6..6cba033de2 100644 --- a/hls4ml/utils/config.py +++ b/hls4ml/utils/config.py @@ -269,6 +269,7 @@ def make_layer_config(layer): def config_from_pytorch_model( model, + input_shape, granularity='model', backend=None, default_precision='ap_fixed<16,6>', @@ -284,6 +285,7 @@ def config_from_pytorch_model( Args: model: PyTorch model + input_shape (tuple or list of tuples): The shape of the input tensor, excluding the batch size. granularity (str, optional): Granularity of the created config. Defaults to 'model'. Can be set to 'model', 'type' and 'layer'. @@ -321,6 +323,83 @@ def config_from_pytorch_model( model_config['Strategy'] = 'Latency' config['Model'] = model_config + config['PytorchModel'] = model + if not (isinstance(input_shape, tuple) or (isinstance(input_shape, list) and isinstance(input_shape[0], tuple))): + raise Exception('Input shape must be tuple (single input) or list of tuples (multiple inputs)') + config['InputShape'] = input_shape + + if granularity.lower() not in ['model', 'type', 'name']: + raise Exception( + f'Invalid configuration granularity specified, expected "model", "type" or "name" got "{granularity}"' + ) + + if backend is not None: + backend = hls4ml.backends.get_backend(backend) + + from hls4ml.converters.pytorch_to_hls import parse_pytorch_model + + ( + layer_list, + _, + ) = parse_pytorch_model(config, verbose=False) + + def make_layer_config(layer): + cls_name = layer['class_name'] + if 'config' in layer.keys(): + if 'activation' in layer['config'].keys(): + if layer['config']['activation'] == 'softmax': + cls_name = 'Softmax' + + layer_cls = hls4ml.model.layers.layer_map[cls_name] + if backend is not None: + layer_cls = backend.create_layer_class(layer_cls) + + layer_config = {} + + config_attrs = [a for a in layer_cls.expected_attributes if a.configurable] + for attr in config_attrs: + if isinstance(attr, hls4ml.model.attributes.TypeAttribute): + precision_cfg = layer_config.setdefault('Precision', {}) + name = attr.name + if name.endswith('_t'): + name = name[:-2] + if attr.default is None: + precision_cfg[name] = default_precision + else: + precision_cfg[name] = str(attr.default) + elif attr.name == 'reuse_factor': + layer_config[attr.config_name] = default_reuse_factor + else: + if attr.default is not None: + layer_config[attr.config_name] = attr.default + + if layer['class_name'] == 'Input': + dtype = layer['config']['dtype'] + if dtype.startswith('int') or dtype.startswith('uint'): + typename = dtype[: dtype.index('int') + 3] + width = int(dtype[dtype.index('int') + 3 :]) + layer_config['Precision']['result'] = f'ap_{typename}<{width}>' + # elif bool, q[u]int, ... + + return layer_config + + if granularity.lower() == 'type': + type_config = {} + for layer in layer_list: + if layer['class_name'] in type_config: + continue + layer_config = make_layer_config(layer) + type_config[layer['class_name']] = layer_config + + config['LayerType'] = type_config + + elif granularity.lower() == 'name': + name_config = {} + for layer in layer_list: + layer_config = make_layer_config(layer) + name_config[layer['name']] = layer_config + + config['LayerName'] = name_config return config diff --git a/test/pytest/test_backend_config.py b/test/pytest/test_backend_config.py index 346402de13..c43a7c7680 100644 --- a/test/pytest/test_backend_config.py +++ b/test/pytest/test_backend_config.py @@ -31,7 +31,7 @@ def test_backend_config(framework, backend, part, clock_period, clock_unc): convert_fn = hls4ml.converters.convert_from_keras_model else: model = torch.nn.Sequential(torch.nn.Linear(1, 2), torch.nn.ReLU()) - config = hls4ml.utils.config_from_pytorch_model(model) + config = hls4ml.utils.config_from_pytorch_model(model, input_shape=(None, 1)) convert_fn = hls4ml.converters.convert_from_pytorch_model if clock_unc is not None: @@ -42,16 +42,27 @@ def test_backend_config(framework, backend, part, clock_period, clock_unc): test_dir = f'hls4mlprj_backend_config_{framework}_{backend}_part_{part}_period_{clock_period}_unc_{unc_str}' output_dir = test_root_path / test_dir - hls_model = convert_fn( - model, - input_shape=(None, 1), # This serves as a test of handling unexpected values by the backend in keras converer - hls_config=config, - output_dir=str(output_dir), - backend=backend, - part=part, - clock_period=clock_period, - clock_uncertainty=clock_unc, - ) + if framework == "keras": + hls_model = convert_fn( + model, + input_shape=(None, 1), # This serves as a test of handling unexpected values by the backend in keras converer + hls_config=config, + output_dir=str(output_dir), + backend=backend, + part=part, + clock_period=clock_period, + clock_uncertainty=clock_unc, + ) + else: + hls_model = convert_fn( + model, + hls_config=config, + output_dir=str(output_dir), + backend=backend, + part=part, + clock_period=clock_period, + clock_uncertainty=clock_unc, + ) hls_model.write() diff --git a/test/pytest/test_batchnorm_pytorch.py b/test/pytest/test_batchnorm_pytorch.py index 1e45e7ae0f..fd4efdf326 100644 --- a/test/pytest/test_batchnorm_pytorch.py +++ b/test/pytest/test_batchnorm_pytorch.py @@ -39,10 +39,12 @@ def test_batchnorm(data, backend, io_type): default_precision = 'ac_fixed<32, 1, true>' if backend == 'Quartus' else 'ac_fixed<32, 1>' - config = hls4ml.utils.config_from_pytorch_model(model, default_precision=default_precision, granularity='name') + config = hls4ml.utils.config_from_pytorch_model( + model, (in_shape,), default_precision=default_precision, granularity='name' + ) output_dir = str(test_root_path / f'hls4mlprj_batchnorm_{backend}_{io_type}') hls_model = hls4ml.converters.convert_from_pytorch_model( - model, (None, in_shape), backend=backend, hls_config=config, io_type=io_type, output_dir=output_dir + model, backend=backend, hls_config=config, io_type=io_type, output_dir=output_dir ) hls_model.compile() @@ -94,9 +96,13 @@ def test_batchnorm_fusion(fusion_data, backend, io_type): # We do not have an implementation of a transpose for io_stream, need to transpose inputs and outputs outside of hls4ml if io_type == 'io_stream': fusion_data = np.ascontiguousarray(fusion_data.transpose(0, 2, 1)) - config = hls4ml.utils.config_from_pytorch_model(model, channels_last_conversion='internal', transpose_outputs=False) + config = hls4ml.utils.config_from_pytorch_model( + model, (n_in, size_in_height), channels_last_conversion='internal', transpose_outputs=False + ) else: - config = hls4ml.utils.config_from_pytorch_model(model, channels_last_conversion='full', transpose_outputs=True) + config = hls4ml.utils.config_from_pytorch_model( + model, (n_in, size_in_height), channels_last_conversion='full', transpose_outputs=True + ) config['Model']['Strategy'] = 'Resource' @@ -104,7 +110,6 @@ def test_batchnorm_fusion(fusion_data, backend, io_type): output_dir = str(test_root_path / f'hls4mlprj_block_{backend}_{io_type}') hls_model = hls4ml.converters.convert_from_pytorch_model( model, - (None, n_in, size_in_height), hls_config=config, output_dir=output_dir, backend=backend, diff --git a/test/pytest/test_merge_pytorch.py b/test/pytest/test_merge_pytorch.py index ac42a7bb42..1dc461e532 100644 --- a/test/pytest/test_merge_pytorch.py +++ b/test/pytest/test_merge_pytorch.py @@ -41,14 +41,16 @@ def test_merge(merge_op, io_type, backend): model = MergeModule(merge_op) model.eval() - batch_input_shape = (None,) + input_shape config = hls4ml.utils.config_from_pytorch_model( - model, default_precision='ap_fixed<32,16>', channels_last_conversion="internal", transpose_outputs=False + model, + [input_shape, input_shape], + default_precision='ap_fixed<32,16>', + channels_last_conversion="internal", + transpose_outputs=False, ) output_dir = str(test_root_path / f'hls4mlprj_merge_pytorch_{merge_op}_{backend}_{io_type}') hls_model = hls4ml.converters.convert_from_pytorch_model( model, - [batch_input_shape, batch_input_shape], hls_config=config, output_dir=output_dir, io_type=io_type, diff --git a/test/pytest/test_pytorch_api.py b/test/pytest/test_pytorch_api.py index 8d18c6a1d2..295867c4ff 100644 --- a/test/pytest/test_pytorch_api.py +++ b/test/pytest/test_pytorch_api.py @@ -32,12 +32,10 @@ def test_linear(backend, io_type): pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy() - config = config_from_pytorch_model(model) + config = config_from_pytorch_model(model, (1,)) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_linear_{backend}_{io_type}') - hls_model = convert_from_pytorch_model( - model, (None, 1), hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) + hls_model = convert_from_pytorch_model(model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type) hls_model.compile() @@ -83,13 +81,11 @@ def test_activations(activation_function, backend, io_type): pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy() - config = config_from_pytorch_model(model) + config = config_from_pytorch_model(model, (1,)) output_dir = str( test_root_path / f'hls4mlprj_pytorch_api_activations_{activation_function.__class__.__name__}_{backend}_{io_type}' ) - hls_model = convert_from_pytorch_model( - model, (None, 1), hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) + hls_model = convert_from_pytorch_model(model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type) hls_model.compile() hls_prediction = hls_model.predict(X_input) @@ -174,12 +170,10 @@ def test_activation_functionals(activation_function, backend, io_type): pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy() - config = config_from_pytorch_model(model) + config = config_from_pytorch_model(model, (1,)) fn_name = activation_function.__class__.__name__ output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_activations_functional_relu_{backend}_{io_type}_{fn_name}') - hls_model = convert_from_pytorch_model( - model, (None, 1), hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) + hls_model = convert_from_pytorch_model(model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type) hls_model.compile() hls_prediction = hls_model.predict(X_input) @@ -217,14 +211,14 @@ def test_conv1d(padds, backend, io_type): if io_type == 'io_stream': X_input = np.ascontiguousarray(X_input.transpose(0, 2, 1)) - config = config_from_pytorch_model(model, channels_last_conversion="internal", transpose_outputs=False) + config = config_from_pytorch_model( + model, (n_in, size_in), channels_last_conversion="internal", transpose_outputs=False + ) else: - config = config_from_pytorch_model(model, channels_last_conversion="full", transpose_outputs=True) + config = config_from_pytorch_model(model, (n_in, size_in), channels_last_conversion="full", transpose_outputs=True) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_conv1d_{padds}_{backend}_{io_type}') - hls_model = convert_from_pytorch_model( - model, (None, n_in, size_in), hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) + hls_model = convert_from_pytorch_model(model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type) hls_model.compile() from torch.fx import symbolic_trace @@ -328,14 +322,17 @@ def test_conv2d(padds, backend, io_type): if io_type == 'io_stream': X_input = np.ascontiguousarray(X_input.transpose(0, 2, 3, 1)) - config = config_from_pytorch_model(model, channels_last_conversion="internal", transpose_outputs=False) + config = config_from_pytorch_model( + model, (n_in, size_in_height, size_in_width), channels_last_conversion="internal", transpose_outputs=False + ) else: - config = config_from_pytorch_model(model, channels_last_conversion="full", transpose_outputs=True) + config = config_from_pytorch_model( + model, (n_in, size_in_height, size_in_width), channels_last_conversion="full", transpose_outputs=True + ) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_conv2d_{padds}_{backend}_{io_type}') hls_model = convert_from_pytorch_model( model, - (None, n_in, size_in_height, size_in_width), hls_config=config, output_dir=output_dir, backend=backend, @@ -478,20 +475,16 @@ def test_pooling(pooling, padds, backend): size_in_height = 0 input_shape = (1, n_in, size_in_height, size_in_width) if '2d' in pooling.__name__ else (1, n_in, size_in_width) - input_shape_forHLS = ( - (None, n_in, size_in_height, size_in_width) if '2d' in pooling.__name__ else (None, n_in, size_in_width) - ) + input_shape_forHLS = (n_in, size_in_height, size_in_width) if '2d' in pooling.__name__ else (n_in, size_in_width) X_input = np.random.rand(*input_shape) model = torch.nn.Sequential(pooling(2, padding=padds)).to() model.eval() pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy() - config = config_from_pytorch_model(model) + config = config_from_pytorch_model(model, input_shape_forHLS) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_pooling_{pooling.__name__}_padds_{padds}_backend_{backend}') - hls_model = convert_from_pytorch_model( - model, input_shape_forHLS, hls_config=config, output_dir=output_dir, backend=backend - ) + hls_model = convert_from_pytorch_model(model, hls_config=config, output_dir=output_dir, backend=backend) hls_model.compile() from torch.fx import symbolic_trace @@ -598,12 +591,10 @@ def test_bn(backend, io_type): pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy().flatten() - config = config_from_pytorch_model(model) + config = config_from_pytorch_model(model, (5,)) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_bn_{backend}_{io_type}') - hls_model = convert_from_pytorch_model( - model, (None, 5), hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) + hls_model = convert_from_pytorch_model(model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type) hls_model.compile() @@ -641,13 +632,11 @@ def test_squeeze(backend, io_type): pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy().flatten() - config = config_from_pytorch_model(model) + config = config_from_pytorch_model(model, (5,)) del config['Model']['ChannelsLastConversion'] # We don't want anything touched for this test output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_squeeze_{backend}_{io_type}') - hls_model = convert_from_pytorch_model( - model, (None, 5), hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) + hls_model = convert_from_pytorch_model(model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type) hls_model.compile() @@ -672,11 +661,11 @@ def test_flatten(backend): input = torch.randn(1, 1, 5, 5) model = nn.Sequential(nn.Conv2d(1, 32, 5, 1, 1), nn.Flatten(), nn.ReLU()) pytorch_prediction = model(input).detach().numpy() - input_shape = (None, 1, 5, 5) + input_shape = (1, 5, 5) - config = config_from_pytorch_model(model) + config = config_from_pytorch_model(model, input_shape) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_flatten_backend_{backend}') - hls_model = convert_from_pytorch_model(model, input_shape, hls_config=config, output_dir=output_dir, backend=backend) + hls_model = convert_from_pytorch_model(model, hls_config=config, output_dir=output_dir, backend=backend) hls_model.compile() pred = hls_model.predict(input.detach().numpy()) @@ -718,14 +707,16 @@ def test_skipped_layers(backend, io_type): model.eval() input_shape = (3, 8) - batch_input_shape = (None,) + input_shape config = config_from_pytorch_model( - model, default_precision='ap_fixed<32,16>', channels_last_conversion="full", transpose_outputs=False + model, + input_shape, + default_precision='ap_fixed<32,16>', + channels_last_conversion="full", + transpose_outputs=False, ) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_skipped_{backend}_{io_type}') hls_model = convert_from_pytorch_model( model, - batch_input_shape, hls_config=config, output_dir=output_dir, io_type=io_type, @@ -781,16 +772,15 @@ def forward(self, x): input_tensor = torch.randn(10, 1, 8, 8) hls_input = np.ascontiguousarray(torch.permute(input_tensor, (0, 2, 3, 1)).detach().numpy()) - batch_input_shape = (None,) + input_shape config = config_from_pytorch_model( model, + input_shape, default_precision='ap_fixed<32,16>', channels_last_conversion="full", # Crucial for testing if the first Transpose was removed ) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_transpose_nop_{tensor_rank}d_{backend}_{io_type}') hls_model = convert_from_pytorch_model( model, - batch_input_shape, hls_config=config, output_dir=output_dir, io_type=io_type, @@ -846,12 +836,11 @@ def forward(self, x): # X_input is channels last X_input = np.ascontiguousarray(X_input.transpose(0, 2, 1)) - config = config_from_pytorch_model(model, channels_last_conversion="internal", transpose_outputs=False) + config = config_from_pytorch_model(model, (n_in, size_in), channels_last_conversion="internal", transpose_outputs=False) output_dir = str(test_root_path / f'hls4mlprj_pytorch_view_{backend}_{io_type}') hls_model = convert_from_pytorch_model( model, - (None, n_in, size_in), hls_config=config, output_dir=output_dir, backend=backend, diff --git a/test/pytest/test_recurrent_pytorch.py b/test/pytest/test_recurrent_pytorch.py index c1672c73b9..e4737ea675 100644 --- a/test/pytest/test_recurrent_pytorch.py +++ b/test/pytest/test_recurrent_pytorch.py @@ -32,12 +32,12 @@ def test_gru(backend, io_type): pytorch_prediction = model(torch.Tensor(X_input), torch.Tensor(h0)).detach().numpy() - config = config_from_pytorch_model(model, channels_last_conversion="off", transpose_outputs=False) + config = config_from_pytorch_model( + model, [(None, 1, 10), (None, 1, 20)], channels_last_conversion="off", transpose_outputs=False + ) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_gru_{backend}_{io_type}') - hls_model = convert_from_pytorch_model( - model, [(None, 1, 10), (None, 1, 20)], hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) + hls_model = convert_from_pytorch_model(model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type) hls_model.compile() @@ -69,12 +69,13 @@ def test_lstm(backend, io_type): pytorch_prediction = model(torch.Tensor(X_input), torch.Tensor(h0), torch.tensor(c0)).detach().numpy() - config = config_from_pytorch_model(model, channels_last_conversion="off", transpose_outputs=False) + config = config_from_pytorch_model( + model, [(None, 1, 10), (None, 1, 20), (None, 1, 20)], channels_last_conversion="off", transpose_outputs=False + ) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_lstm_{backend}_{io_type}') hls_model = convert_from_pytorch_model( model, - [(None, 1, 10), (None, 1, 20), (None, 1, 20)], hls_config=config, output_dir=output_dir, backend=backend, @@ -112,11 +113,13 @@ def test_rnn(backend, io_type): pytorch_prediction = model(torch.Tensor(X_input), torch.Tensor(h0)).detach().numpy() - config = config_from_pytorch_model(model, channels_last_conversion="off", transpose_outputs=False) + config = config_from_pytorch_model( + model, [(1, 10), (1, 20)], channels_last_conversion="off", transpose_outputs=False + ) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_rnn_{backend}_{io_type}') hls_model = convert_from_pytorch_model( - model, [(None, 1, 10), (None, 1, 20)], hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type + model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type ) hls_model.compile() diff --git a/test/pytest/test_sequential_parsing_pytorch.py b/test/pytest/test_sequential_parsing_pytorch.py index 569c6a5b1c..20b273400a 100644 --- a/test/pytest/test_sequential_parsing_pytorch.py +++ b/test/pytest/test_sequential_parsing_pytorch.py @@ -59,12 +59,10 @@ def test_unnamed_seq(backend, io_type, named_layers): model = seq_named else: model = seq_unnamed - config = config_from_pytorch_model(model) + config = config_from_pytorch_model(model, (1, 5, 5)) output_dir = str(test_root_path / f'hls4mlprj_pytorch_seq_unnamed_{backend}_{io_type}_{named_layers}') - convert_from_pytorch_model( - model, (None, 1, 5, 5), hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) + convert_from_pytorch_model(model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type) @pytest.mark.parametrize('backend', ['Vivado']) @@ -75,9 +73,7 @@ def test_named_seq(backend, io_type, named_layers): model = SeqModelNamedLayers() else: model = SeqModelUnnamedLayers() - config = config_from_pytorch_model(model) + config = config_from_pytorch_model(model, (1, 5, 5)) output_dir = str(test_root_path / f'hls4mlprj_pytorch_seq_named_{backend}_{io_type}_{named_layers}') - convert_from_pytorch_model( - model, (None, 1, 5, 5), hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) + convert_from_pytorch_model(model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type) diff --git a/test/pytest/test_upsampling_pytorch.py b/test/pytest/test_upsampling_pytorch.py index e881c39bbf..6e0d8f78ad 100644 --- a/test/pytest/test_upsampling_pytorch.py +++ b/test/pytest/test_upsampling_pytorch.py @@ -55,13 +55,14 @@ def test_pytorch_upsampling1d(data_1d, io_type, backend): config = hls4ml.utils.config_from_pytorch_model( model, + (None, in_feat, in_width), default_precision='ap_fixed<16,6>', channels_last_conversion="internal", transpose_outputs=False, ) odir = str(test_root_path / f'hls4mlprj_pytorch_upsampling_1d_{backend}_{io_type}') hls_model = hls4ml.converters.convert_from_pytorch_model( - model, (None, in_feat, in_width), hls_config=config, io_type=io_type, output_dir=odir, backend=backend + model, hls_config=config, io_type=io_type, output_dir=odir, backend=backend ) hls_model.compile() @@ -84,13 +85,14 @@ def test_pytorch_upsampling2d(data_2d, io_type, backend): config = hls4ml.utils.config_from_pytorch_model( model, + (in_feat, in_height, in_width), default_precision='ap_fixed<16,6>', channels_last_conversion="full", # With conversion to channels_last transpose_outputs=True, ) odir = str(test_root_path / f'hls4mlprj_pytorch_upsampling_2d_{backend}_{io_type}') hls_model = hls4ml.converters.convert_from_pytorch_model( - model, (None, in_feat, in_height, in_width), hls_config=config, io_type=io_type, output_dir=odir, backend=backend + model, hls_config=config, io_type=io_type, output_dir=odir, backend=backend ) hls_model.compile()