From 19b25876924f54c5aeadb18afc61e282d5dc46cc Mon Sep 17 00:00:00 2001 From: Connor Holmes Date: Thu, 9 Nov 2023 18:34:44 -0800 Subject: [PATCH 1/6] Inference Checkpoints (#4620) Co-authored-by: Jeff Rasley Co-authored-by: Michael Wyatt Co-authored-by: Ammar Ahmad Awan Co-authored-by: Masahiro Tanaka Co-authored-by: Logan Adams <114770087+loadams@users.noreply.github.com> Co-authored-by: Reza Yazdani <44502768+RezaYazdaniAminabadi@users.noreply.github.com> Co-authored-by: Reza Yazdani --- .github/workflows/nv-accelerate-v100.yml | 1 + .github/workflows/nv-inference.yml | 1 + .github/workflows/nv-lightning-v100.yml | 1 + .github/workflows/nv-megatron.yml | 1 + .github/workflows/nv-pre-compile-ops.yml | 1 + .github/workflows/nv-torch-latest-cpu.yml | 1 + .github/workflows/nv-torch-latest-v100.yml | 1 + .github/workflows/nv-transformers-v100.yml | 1 + deepspeed/inference/__init__.py | 2 +- deepspeed/inference/v2/__init__.py | 2 +- deepspeed/inference/v2/allocator.py | 2 +- .../v2/checkpoint/huggingface_engine.py | 2 + deepspeed/inference/v2/engine_factory.py | 107 +++++-- deepspeed/inference/v2/engine_v2.py | 32 +- deepspeed/inference/v2/inference_parameter.py | 89 ++++++ .../v2/model_implementations/__init__.py | 5 + .../common_architectures}/__init__.py | 0 .../common_parameters/embedding_parameters.py | 5 +- .../common_parameters/invfreq_parameters.py | 2 - .../common_parameters/moe_parameters.py | 2 - .../common_parameters/qkv_parameters.py | 1 - .../flat_model_helpers.py | 275 ++++++++++++++++ .../inference_model_base.py | 73 ++++- .../inference_policy_base.py | 79 ++++- .../inference_transformer_base.py | 57 +--- .../layer_container_base.py | 45 ++- .../llama_v2/__init__.py | 2 + .../llama_v2/llama_v2_containers.py | 2 - .../llama_v2/llama_v2_policy.py | 6 - .../model_implementations/mistral/__init__.py | 2 + .../model_implementations/mistral/policy.py | 6 - .../v2/model_implementations/opt/__init__.py | 2 + .../v2/model_implementations/opt/policy.py | 6 - .../model_implementations/parameter_base.py | 2 - .../implementations/linear/__init__.py | 1 - .../implementations/linear/blas_fp_linear.py | 6 +- .../linear/cutlass_fp_linear.py | 81 ----- .../implementations/moe/cutlass_multi_gemm.py | 20 +- .../v2/modules/implementations/moe/gate_fn.py | 62 ---- .../v2/modules/implementations/moe/test.py | 38 --- .../implementations/post_norm/cuda_post_ln.py | 6 +- .../implementations/pre_norm/cuda_pre_ln.py | 6 +- .../implementations/pre_norm/cuda_pre_rms.py | 6 +- .../v2/modules/interfaces/embedding_base.py | 5 +- .../v2/modules/interfaces/linear_base.py | 3 +- .../v2/modules/interfaces/moe_base.py | 7 +- .../v2/modules/interfaces/post_norm_base.py | 3 +- .../v2/modules/interfaces/pre_norm_base.py | 3 +- .../inference/v2/ragged/csrc/ragged_ops.cpp | 31 ++ .../inference/kernels/core_ops/__init__.py | 4 - .../kernels/core_ops/test_bias_activation.py | 101 ------ .../kernels/core_ops/test_blas_linear.py | 73 ----- .../kernels/core_ops/test_gated_activation.py | 133 -------- .../kernels/core_ops/test_post_ln.py | 47 --- .../inference/kernels/core_ops/test_pre_ln.py | 51 --- .../kernels/core_ops/test_rms_norm.py | 77 ----- .../inference/kernels/cutlass_ops/__init__.py | 4 - .../kernels/cutlass_ops/test_moe_gemm.py | 113 ------- .../inference/kernels/ragged_ops/__init__.py | 4 - .../ragged_ops/ragged_testing_utils.py | 300 ------------------ .../kernels/ragged_ops/test_atom_builder.py | 45 --- .../kernels/ragged_ops/test_blocked_flash.py | 197 ------------ .../ragged_ops/test_blocked_kv_copy.py | 112 ------- .../ragged_ops/test_blocked_rotary_emb.py | 203 ------------ .../kernels/ragged_ops/test_logits_gather.py | 96 ------ .../kernels/ragged_ops/test_moe_gather.py | 83 ----- .../kernels/ragged_ops/test_moe_scatter.py | 74 ----- .../kernels/ragged_ops/test_ragged_embed.py | 177 ----------- .../kernels/ragged_ops/test_top_1_gating.py | 120 ------- .../model_implementations/__init__.py | 4 - .../parameters/__init__.py | 4 - .../parameters/test_layer_inheritance.py | 50 --- .../parameters/test_mapping.py | 165 ---------- .../parameters/test_multi_parameter_layer.py | 111 ------- .../parameters/test_parameter_list.py | 104 ------ .../model_implementations/parameters/utils.py | 58 ---- .../sharding/__init__.py | 4 - .../sharding/test_attn_out_sharding.py | 129 -------- .../sharding/test_mlp_sharding.py | 116 ------- .../sharding/test_qkv_sharding.py | 251 --------------- tests/unit/inference/modules/__init__.py | 4 - .../modules/test_blas_linear_module.py | 111 ------- .../inference/modules/test_blocked_attn.py | 210 ------------ .../modules/test_cuda_pre_ln_module.py | 88 ----- .../inference/modules/test_custom_module.py | 76 ----- .../inference/modules/test_cutlass_moe.py | 214 ------------- .../inference/modules/test_post_ln_module.py | 58 ---- .../inference/modules/test_pre_rms_module.py | 88 ----- tests/unit/inference/ragged/__init__.py | 4 - .../ragged/test_blocked_allocator.py | 166 ---------- .../inference/ragged/test_manager_configs.py | 58 ---- .../inference/ragged/test_ragged_wrapper.py | 112 ------- .../parameters/test_contiguify.py | 120 +++++++ .../parameters/test_layer_inheritance.py | 12 +- .../parameters/test_mapping.py | 27 +- .../parameters/test_multi_parameter_layer.py | 58 +--- .../parameters/test_parameter_list.py | 5 +- .../model_implementations/parameters/utils.py | 6 +- 98 files changed, 924 insertions(+), 4597 deletions(-) create mode 100644 deepspeed/inference/v2/inference_parameter.py rename {tests/unit/inference/kernels => deepspeed/inference/v2/model_implementations/common_architectures}/__init__.py (100%) create mode 100644 deepspeed/inference/v2/model_implementations/flat_model_helpers.py delete mode 100644 deepspeed/inference/v2/modules/implementations/linear/cutlass_fp_linear.py delete mode 100644 deepspeed/inference/v2/modules/implementations/moe/gate_fn.py delete mode 100644 deepspeed/inference/v2/modules/implementations/moe/test.py delete mode 100644 tests/unit/inference/kernels/core_ops/__init__.py delete mode 100644 tests/unit/inference/kernels/core_ops/test_bias_activation.py delete mode 100644 tests/unit/inference/kernels/core_ops/test_blas_linear.py delete mode 100644 tests/unit/inference/kernels/core_ops/test_gated_activation.py delete mode 100644 tests/unit/inference/kernels/core_ops/test_post_ln.py delete mode 100644 tests/unit/inference/kernels/core_ops/test_pre_ln.py delete mode 100644 tests/unit/inference/kernels/core_ops/test_rms_norm.py delete mode 100644 tests/unit/inference/kernels/cutlass_ops/__init__.py delete mode 100644 tests/unit/inference/kernels/cutlass_ops/test_moe_gemm.py delete mode 100644 tests/unit/inference/kernels/ragged_ops/__init__.py delete mode 100644 tests/unit/inference/kernels/ragged_ops/ragged_testing_utils.py delete mode 100644 tests/unit/inference/kernels/ragged_ops/test_atom_builder.py delete mode 100644 tests/unit/inference/kernels/ragged_ops/test_blocked_flash.py delete mode 100644 tests/unit/inference/kernels/ragged_ops/test_blocked_kv_copy.py delete mode 100644 tests/unit/inference/kernels/ragged_ops/test_blocked_rotary_emb.py delete mode 100644 tests/unit/inference/kernels/ragged_ops/test_logits_gather.py delete mode 100644 tests/unit/inference/kernels/ragged_ops/test_moe_gather.py delete mode 100644 tests/unit/inference/kernels/ragged_ops/test_moe_scatter.py delete mode 100644 tests/unit/inference/kernels/ragged_ops/test_ragged_embed.py delete mode 100644 tests/unit/inference/kernels/ragged_ops/test_top_1_gating.py delete mode 100644 tests/unit/inference/model_implementations/__init__.py delete mode 100644 tests/unit/inference/model_implementations/parameters/__init__.py delete mode 100644 tests/unit/inference/model_implementations/parameters/test_layer_inheritance.py delete mode 100644 tests/unit/inference/model_implementations/parameters/test_mapping.py delete mode 100644 tests/unit/inference/model_implementations/parameters/test_multi_parameter_layer.py delete mode 100644 tests/unit/inference/model_implementations/parameters/test_parameter_list.py delete mode 100644 tests/unit/inference/model_implementations/parameters/utils.py delete mode 100644 tests/unit/inference/model_implementations/sharding/__init__.py delete mode 100644 tests/unit/inference/model_implementations/sharding/test_attn_out_sharding.py delete mode 100644 tests/unit/inference/model_implementations/sharding/test_mlp_sharding.py delete mode 100644 tests/unit/inference/model_implementations/sharding/test_qkv_sharding.py delete mode 100644 tests/unit/inference/modules/__init__.py delete mode 100644 tests/unit/inference/modules/test_blas_linear_module.py delete mode 100644 tests/unit/inference/modules/test_blocked_attn.py delete mode 100644 tests/unit/inference/modules/test_cuda_pre_ln_module.py delete mode 100644 tests/unit/inference/modules/test_custom_module.py delete mode 100644 tests/unit/inference/modules/test_cutlass_moe.py delete mode 100644 tests/unit/inference/modules/test_post_ln_module.py delete mode 100644 tests/unit/inference/modules/test_pre_rms_module.py delete mode 100644 tests/unit/inference/ragged/__init__.py delete mode 100644 tests/unit/inference/ragged/test_blocked_allocator.py delete mode 100644 tests/unit/inference/ragged/test_manager_configs.py delete mode 100644 tests/unit/inference/ragged/test_ragged_wrapper.py create mode 100644 tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py diff --git a/.github/workflows/nv-accelerate-v100.yml b/.github/workflows/nv-accelerate-v100.yml index 4525a8124dc2..0f6491e08336 100644 --- a/.github/workflows/nv-accelerate-v100.yml +++ b/.github/workflows/nv-accelerate-v100.yml @@ -6,6 +6,7 @@ on: - 'docs/**' - 'blogs/**' - 'deepspeed/inference/v2/**' + - "tests/unit/inference/v2/**" merge_group: branches: [ master ] schedule: diff --git a/.github/workflows/nv-inference.yml b/.github/workflows/nv-inference.yml index e9c63051cbdf..f20b4496b6df 100644 --- a/.github/workflows/nv-inference.yml +++ b/.github/workflows/nv-inference.yml @@ -6,6 +6,7 @@ on: - 'docs/**' - 'blogs/**' - 'deepspeed/inference/v2/**' + - "tests/unit/inference/v2/**" merge_group: branches: [ master ] schedule: diff --git a/.github/workflows/nv-lightning-v100.yml b/.github/workflows/nv-lightning-v100.yml index b2b900e186f8..d25d40aef967 100644 --- a/.github/workflows/nv-lightning-v100.yml +++ b/.github/workflows/nv-lightning-v100.yml @@ -6,6 +6,7 @@ on: - 'docs/**' - 'blogs/**' - 'deepspeed/inference/v2/**' + - "tests/unit/inference/v2/**" merge_group: branches: [ master ] schedule: diff --git a/.github/workflows/nv-megatron.yml b/.github/workflows/nv-megatron.yml index 7bd29bb14e07..3a3b70dcd17d 100644 --- a/.github/workflows/nv-megatron.yml +++ b/.github/workflows/nv-megatron.yml @@ -6,6 +6,7 @@ on: - 'docs/**' - 'blogs/**' - 'deepspeed/inference/v2/**' + - "tests/unit/inference/v2/**" merge_group: branches: [ master ] schedule: diff --git a/.github/workflows/nv-pre-compile-ops.yml b/.github/workflows/nv-pre-compile-ops.yml index 79bc36dac7ab..839312190d22 100644 --- a/.github/workflows/nv-pre-compile-ops.yml +++ b/.github/workflows/nv-pre-compile-ops.yml @@ -8,6 +8,7 @@ on: - 'docs/**' - 'blogs/**' - 'deepspeed/inference/v2/**' + - "tests/unit/inference/v2/**" merge_group: branches: [ master ] schedule: diff --git a/.github/workflows/nv-torch-latest-cpu.yml b/.github/workflows/nv-torch-latest-cpu.yml index b62d30e3621b..9ca1529d9018 100644 --- a/.github/workflows/nv-torch-latest-cpu.yml +++ b/.github/workflows/nv-torch-latest-cpu.yml @@ -6,6 +6,7 @@ on: - 'docs/**' - 'blogs/**' - 'deepspeed/inference/v2/**' + - "tests/unit/inference/v2/**" merge_group: branches: [ master ] schedule: diff --git a/.github/workflows/nv-torch-latest-v100.yml b/.github/workflows/nv-torch-latest-v100.yml index 2d396b79b14a..8813a4bb2c4f 100644 --- a/.github/workflows/nv-torch-latest-v100.yml +++ b/.github/workflows/nv-torch-latest-v100.yml @@ -6,6 +6,7 @@ on: - 'docs/**' - 'blogs/**' - 'deepspeed/inference/v2/**' + - "tests/unit/inference/v2/**" merge_group: branches: [ master ] schedule: diff --git a/.github/workflows/nv-transformers-v100.yml b/.github/workflows/nv-transformers-v100.yml index 4ac66edb1e93..7753133f2886 100644 --- a/.github/workflows/nv-transformers-v100.yml +++ b/.github/workflows/nv-transformers-v100.yml @@ -6,6 +6,7 @@ on: - 'docs/**' - 'blogs/**' - 'deepspeed/inference/v2/**' + - "tests/unit/inference/v2/**" merge_group: branches: [ master ] schedule: diff --git a/deepspeed/inference/__init__.py b/deepspeed/inference/__init__.py index 7fed50cbe177..0ee72fa36975 100644 --- a/deepspeed/inference/__init__.py +++ b/deepspeed/inference/__init__.py @@ -4,4 +4,4 @@ # DeepSpeed Team from .v2 import RaggedInferenceEngineConfig, DeepSpeedTPConfig from .v2.engine_v2 import InferenceEngineV2 -from .v2 import build_hf_engine +from .v2 import build_hf_engine, buid_engine_from_ds_checkpoint diff --git a/deepspeed/inference/v2/__init__.py b/deepspeed/inference/v2/__init__.py index bba5d1a82081..c7b91db08462 100644 --- a/deepspeed/inference/v2/__init__.py +++ b/deepspeed/inference/v2/__init__.py @@ -4,4 +4,4 @@ # DeepSpeed Team from .config_v2 import RaggedInferenceEngineConfig, DeepSpeedTPConfig from .engine_v2 import InferenceEngineV2 -from .engine_factory import build_hf_engine +from .engine_factory import build_hf_engine, buid_engine_from_ds_checkpoint diff --git a/deepspeed/inference/v2/allocator.py b/deepspeed/inference/v2/allocator.py index fa2c5368604e..bebdcf83aee3 100644 --- a/deepspeed/inference/v2/allocator.py +++ b/deepspeed/inference/v2/allocator.py @@ -26,7 +26,7 @@ def on_device(method) -> torch.Tensor: def wrapped(self, *args, **kwargs): tensor = method(self, *args, **kwargs) if isinstance(tensor, torch.Tensor): - return tensor.to(get_accelerator().current_device()).contiguous() + return tensor.to(get_accelerator().current_device()) return tensor return wrapped diff --git a/deepspeed/inference/v2/checkpoint/huggingface_engine.py b/deepspeed/inference/v2/checkpoint/huggingface_engine.py index 515378d31d02..029e3f7774c0 100644 --- a/deepspeed/inference/v2/checkpoint/huggingface_engine.py +++ b/deepspeed/inference/v2/checkpoint/huggingface_engine.py @@ -90,6 +90,8 @@ def parameters(self) -> Iterable[Tuple[str, torch.Tensor]]: param = checkpoint_sd[param_name] yield param_name, param + del checkpoint_sd + if __name__ == "__main__": # To test, add your auth_token here and run `python huggingface_engine.py` diff --git a/deepspeed/inference/v2/engine_factory.py b/deepspeed/inference/v2/engine_factory.py index 48274d6c3d53..368c433aa9c1 100644 --- a/deepspeed/inference/v2/engine_factory.py +++ b/deepspeed/inference/v2/engine_factory.py @@ -3,44 +3,99 @@ # DeepSpeed Team +import json import logging -from typing import Any +import os +import pickle from .engine_v2 import InferenceEngineV2 from .config_v2 import RaggedInferenceEngineConfig from .checkpoint import HuggingFaceCheckpointEngine from .logging import inference_logger +from .model_implementations import ( + OPTPolicy, + Llama2Policy, + MistralPolicy, +) +from .model_implementations.inference_policy_base import POLICIES, InferenceV2Policy +from .model_implementations.flat_model_helpers import make_metadata_filename, ModelMetadata + + +def buid_engine_from_ds_checkpoint(path: str, + engine_config: RaggedInferenceEngineConfig, + debug_level: int = logging.INFO) -> InferenceEngineV2: + """ + Creates an engine from a checkpoint saved by ``InferenceEngineV2``. + + Arguments: + path: Path to the checkpoint. This does not need to point to any files in particular, + just the directory containing the checkpoint. + engine_config: Engine configuration. See ``RaggedInferenceEngineConfig`` for details. + debug_level: Logging level to use. Unless you are actively seeing issues, the recommended + value is ``logging.INFO``. + + Returns: + Fully initialized inference engine ready to serve queries. + """ + + inference_logger(level=debug_level) + # Load metadata, for grabbing the policy name we'll have all ranks just check for + # rank 0. + metadata_filename = make_metadata_filename(path, 0, engine_config.tensor_parallel.tp_size) + metadata = json.load(open(metadata_filename, "r")) + metadata = ModelMetadata.parse_raw(metadata) + + # Get the policy + try: + policy_cls: InferenceV2Policy = POLICIES[metadata.policy] + except KeyError: + raise ValueError(f"Unknown policy {metadata.policy} for model {path}") + + # Load the model config + model_config = pickle.load(open(os.path.join(path, "ds_model_config.pkl"), "rb")) + policy = policy_cls(model_config, inf_checkpoint_path=path) + + return InferenceEngineV2(policy, engine_config) def build_hf_engine(path: str, engine_config: RaggedInferenceEngineConfig, - debug_level: int = logging.INFO, - random_weights_config: Any = None, - fill_random: bool = False) -> InferenceEngineV2: + debug_level: int = logging.INFO) -> InferenceEngineV2: """ - Build an InferenceV2 engine for HuggingFace models. + Build an InferenceV2 engine for HuggingFace models. This can accept both a HuggingFace + model name or a path to an Inference-V2 checkpoint. + + Arguments: + path: Path to the checkpoint. This does not need to point to any files in particular, + just the directory containing the checkpoint. + engine_config: Engine configuration. See ``RaggedInferenceEngineConfig`` for details. + debug_level: Logging level to use. Unless you are actively seeing issues, the recommended + value is ``logging.INFO``. + + Returns: + Fully initialized inference engine ready to serve queries. """ - # Set up logging - inference_logger(level=debug_level) - # get HF checkpoint engine - checkpoint_engine = HuggingFaceCheckpointEngine(path) - - # get model config from HF AutoConfig - model_config = checkpoint_engine.model_config - - # get the policy - # TODO: generalize this to other models - if model_config.model_type == "opt": - from .model_implementations.opt.policy import OPTPolicy - policy = OPTPolicy(checkpoint_engine, model_config) - elif model_config.model_type == "llama": - from .model_implementations.llama_v2.llama_v2_policy import Llama2Policy - policy = Llama2Policy(checkpoint_engine, model_config) - elif model_config.model_type == "mistral": - from .model_implementations.mistral.policy import MistralPolicy - policy = MistralPolicy(checkpoint_engine, model_config) + if os.path.exists(os.path.join(path, "ds_model_config.pkl")): + return buid_engine_from_ds_checkpoint(path, engine_config, debug_level=debug_level) else: - raise ValueError(f"Unsupported model type {model_config.model_type}") + # Set up logging + inference_logger(level=debug_level) + # get HF checkpoint engine + checkpoint_engine = HuggingFaceCheckpointEngine(path) - return InferenceEngineV2(policy, engine_config) + # get model config from HF AutoConfig + model_config = checkpoint_engine.model_config + + # get the policy + # TODO: generalize this to other models + if model_config.model_type == "opt": + policy = OPTPolicy(model_config, checkpoint_engine=checkpoint_engine) + elif model_config.model_type == "llama": + policy = Llama2Policy(model_config, checkpoint_engine=checkpoint_engine) + elif model_config.model_type == "mistral": + policy = MistralPolicy(model_config, checkpoint_engine=checkpoint_engine) + else: + raise ValueError(f"Unsupported model type {model_config.model_type}") + + return InferenceEngineV2(policy, engine_config) diff --git a/deepspeed/inference/v2/engine_v2.py b/deepspeed/inference/v2/engine_v2.py index c670ddd09417..2c28262dae7b 100644 --- a/deepspeed/inference/v2/engine_v2.py +++ b/deepspeed/inference/v2/engine_v2.py @@ -4,6 +4,8 @@ # DeepSpeed Team import os +import json +import pickle from typing import Iterable, Tuple import torch @@ -17,6 +19,8 @@ from .logging import inference_logger from .ragged import DSStateManager, RaggedBatchWrapper, PlaceholderSequenceDescriptor from .scheduling_utils import SchedulingError, SchedulingResult +from .model_implementations.flat_model_helpers import make_param_filename, make_metadata_filename +from .model_implementations.inference_model_base import DSInferenceModelBase from .config_v2 import RaggedInferenceEngineConfig @@ -30,7 +34,7 @@ class InferenceEngineV2: Configuration of the inference engine. """ - #_model: DSInferenceModelBase + _model: DSInferenceModelBase """ Inference model supporting ragged inference. """ @@ -47,6 +51,13 @@ def free_blocks(self) -> int: """ return self._state_manager.free_blocks + @property + def model(self) -> DSInferenceModelBase: + """ + The model implementation. + """ + return self._model + def __init__(self, policy: InferenceV2Policy, engine_config: RaggedInferenceEngineConfig) -> None: """ Create the Inference V2 engine. @@ -215,3 +226,22 @@ def flush(self, uid: int) -> None: uid (int): The UID of the sequence to flush. """ self._state_manager.flush_sequence(uid) + + def serialize(self, save_path: str) -> None: + """ + Serialize the model to a file. + + Arguments: + path (str): Path to the file to serialize to. + """ + param_file_name = make_param_filename(save_path, self._model.tp_rank, self._model.tp_size) + metadata_file_name = make_metadata_filename(save_path, self._model.tp_rank, self._model.tp_size) + + # Save the flattened parameters + + torch.save(self._model.flattened_params, param_file_name) + + json.dump(self._model.flattened_param_metadata.json(), open(metadata_file_name, "w")) + + if self._model.tp_rank == 0: + pickle.dump(self._model._config, open(os.path.join(save_path, "ds_model_config.pkl"), "wb")) diff --git a/deepspeed/inference/v2/inference_parameter.py b/deepspeed/inference/v2/inference_parameter.py new file mode 100644 index 000000000000..4dcff16a4515 --- /dev/null +++ b/deepspeed/inference/v2/inference_parameter.py @@ -0,0 +1,89 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Dict + +import torch + +CORE_PARAM = "_ds_core_param_key" + +STR_TO_DTYPE = { + "torch.float32": torch.float32, + "torch.float64": torch.float64, + "torch.float16": torch.float16, + "torch.bfloat16": torch.bfloat16, + "torch.int64": torch.int64, + "torch.int32": torch.int32, + "torch.int16": torch.int16, + "torch.int8": torch.int8, + "torch.uint8": torch.uint8, + "torch.bool": torch.bool, +} + + +class InferenceParameter(torch.Tensor): + """ + An extension of the torch.Tensor class to support our inference focused features. One important + thing to note here is that an InferenceParam can be used a torch.Tensor, but outputs of + torch.Tensor operations will not be InferenceParams. + """ + + @staticmethod + def __new__(cls, tensor, *args, **kwargs): + new_tensor = super().__new__(cls, tensor, *args, **kwargs) + if hasattr(tensor, "_aux_attrs"): + setattr(new_tensor, "_aux_attrs", tensor.aux_attrs) + return new_tensor + + def to(self, *args, **kwargs): + new_tensor = super().to(*args, **kwargs) + if hasattr(self, "_aux_attrs"): + setattr(new_tensor, "_aux_attrs", self.aux_attrs) + try: + _ = torch.device(args[0]) + for name, attr in new_tensor.aux_attrs.items(): + new_attr = attr.to(*args, **kwargs) + setattr(new_tensor, name, new_attr) + new_tensor.aux_attrs[name] = new_attr + except: + pass + + return new_tensor + + @classmethod + def initialize(cls, core_param: torch.Tensor, **kwargs) -> 'InferenceParameter': + """ + Create the inference parameter. + """ + param = InferenceParameter(core_param) + setattr(param, "_aux_attrs", kwargs) + + for attr_name, attr in kwargs.items(): + if hasattr(param, attr_name): + raise ValueError(f"Attribute {attr_name} already exists on param.") + + if not isinstance(attr, torch.Tensor): + raise ValueError(f"Attribute {attr_name} must be a tensor.") + + setattr(param, attr_name, attr) + + return param + + @classmethod + def initialize_raw(self, **kwargs) -> 'InferenceParameter': + """ + All kwargs must be torch.Tensors and must include the core parameter. + """ + if CORE_PARAM not in kwargs: + raise ValueError(f"Must provide core parameter, with key {CORE_PARAM}.") + + return InferenceParameter.initialize(kwargs[CORE_PARAM], **kwargs) + + @property + def aux_attrs(self) -> Dict[str, torch.Tensor]: + """ + Dictionary of auxiliary attributes. + """ + return self._aux_attrs diff --git a/deepspeed/inference/v2/model_implementations/__init__.py b/deepspeed/inference/v2/model_implementations/__init__.py index a3481023a8fd..dae406271245 100644 --- a/deepspeed/inference/v2/model_implementations/__init__.py +++ b/deepspeed/inference/v2/model_implementations/__init__.py @@ -7,3 +7,8 @@ from .inference_transformer_base import DSTransformerModelBase, DSMoETransformerModelBase from .inference_policy_base import InferenceV2Policy, ContainerMap from .sharding import * + +# Model Implementations +from .llama_v2 import * +from .opt import * +from .mistral import * diff --git a/tests/unit/inference/kernels/__init__.py b/deepspeed/inference/v2/model_implementations/common_architectures/__init__.py similarity index 100% rename from tests/unit/inference/kernels/__init__.py rename to deepspeed/inference/v2/model_implementations/common_architectures/__init__.py diff --git a/deepspeed/inference/v2/model_implementations/common_parameters/embedding_parameters.py b/deepspeed/inference/v2/model_implementations/common_parameters/embedding_parameters.py index 4babc0ee0127..2ed34b5fd259 100644 --- a/deepspeed/inference/v2/model_implementations/common_parameters/embedding_parameters.py +++ b/deepspeed/inference/v2/model_implementations/common_parameters/embedding_parameters.py @@ -6,7 +6,6 @@ import torch from ...model_implementations.parameter_base import ParameterBase -from ...allocator import on_device """ Embedding containers. """ @@ -23,7 +22,5 @@ class EmbeddingParameter(ParameterBase): Vocabulary parameter of shape [vocab_size, model_dim]. """ - @on_device def finalize(self) -> torch.Tensor: - return self.params - #return self.inference_model.transform_embed_param(self.params) + return self.inference_model.transform_embedding_param(self.params) diff --git a/deepspeed/inference/v2/model_implementations/common_parameters/invfreq_parameters.py b/deepspeed/inference/v2/model_implementations/common_parameters/invfreq_parameters.py index 3a5a7fb04b9a..163f9de81d98 100644 --- a/deepspeed/inference/v2/model_implementations/common_parameters/invfreq_parameters.py +++ b/deepspeed/inference/v2/model_implementations/common_parameters/invfreq_parameters.py @@ -6,7 +6,6 @@ import torch from ...model_implementations.parameter_base import ParameterBase -from ...allocator import on_device """ Common InvFreq Parameter Patterns """ @@ -16,6 +15,5 @@ class InvFreqParameter(ParameterBase): params: torch.Tensor - @on_device def finalize(self) -> torch.Tensor: return self.params.to(self.inference_model.activation_dtype.value) diff --git a/deepspeed/inference/v2/model_implementations/common_parameters/moe_parameters.py b/deepspeed/inference/v2/model_implementations/common_parameters/moe_parameters.py index ae95e628b779..df5f1427a5cf 100644 --- a/deepspeed/inference/v2/model_implementations/common_parameters/moe_parameters.py +++ b/deepspeed/inference/v2/model_implementations/common_parameters/moe_parameters.py @@ -5,7 +5,6 @@ import torch -from ...allocator import on_device from ...model_implementations.parameter_base import ParameterBase, ParamList """ Moe Parameters @@ -24,7 +23,6 @@ class MoEGatingWeightParameter(ParameterBase): Projection matrix from the input activations to the gate logits. """ - @on_device def finalize(self) -> torch.Tensor: return self.inference_model.transform_moe_gate_param(self.params) diff --git a/deepspeed/inference/v2/model_implementations/common_parameters/qkv_parameters.py b/deepspeed/inference/v2/model_implementations/common_parameters/qkv_parameters.py index 2ed8a8654f5b..e240137186fe 100644 --- a/deepspeed/inference/v2/model_implementations/common_parameters/qkv_parameters.py +++ b/deepspeed/inference/v2/model_implementations/common_parameters/qkv_parameters.py @@ -111,6 +111,5 @@ def finalize(self) -> torch.Tensor: head_size = self.inference_model.head_size n_q_heads = self.inference_model.n_heads_q n_kv_heads = self.inference_model.n_heads_kv - transposed_param = transform_gqa_megatron(self.params, head_size, n_q_heads, n_kv_heads) return self.inference_model.transform_qkv_param(transposed_param) diff --git a/deepspeed/inference/v2/model_implementations/flat_model_helpers.py b/deepspeed/inference/v2/model_implementations/flat_model_helpers.py new file mode 100644 index 000000000000..dbec911230f5 --- /dev/null +++ b/deepspeed/inference/v2/model_implementations/flat_model_helpers.py @@ -0,0 +1,275 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Dict, Iterable, Tuple, Optional +from os import path + +import torch + +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import RaggedUtilsBuilder +from deepspeed.runtime.config_utils import DeepSpeedConfigModel +from .layer_container_base import LayerContainer +from ..inference_parameter import InferenceParameter, STR_TO_DTYPE +from ..inference_utils import elem_size + + +class TensorMetadata(DeepSpeedConfigModel): + """ + A class to represent a tensor specification. + """ + dtype: Optional[str] + shape: Optional[Tuple[int, ...]] + strides: Optional[Tuple[int, ...]] + offset: int + + +class ParameterMetadata(DeepSpeedConfigModel): + """ + A class to represent a parameter specification. + """ + core_param: TensorMetadata = None + aux_params: Dict[str, TensorMetadata] = {} + + +class LayerMetadata(DeepSpeedConfigModel): + """ + A class to represent a layer specification. + """ + params: Dict[str, ParameterMetadata] = {} + + +class ModelMetadata(DeepSpeedConfigModel): + """ + A class to represent a model specification. + """ + policy: str = "" + layers: Dict[str, LayerMetadata] = {} + + +def make_param_filename(base: str, rank: int, n_ranks: int) -> str: + """ + Make a filename for a parameter file. + + Arguments: + rank: Rank of the file. + n_ranks: Total number of ranks. + + Returns: + str: Filename. + """ + return path.join(base, f"params_rank_{rank}_of_{n_ranks}.pt") + + +def make_metadata_filename(base: str, rank: int, n_ranks: int) -> str: + """ + Make a filename for a metadata file. + + Arguments: + rank: Rank of the file. + n_ranks: Total number of ranks. + + Returns: + str: Filename. + """ + return path.join(base, f"metadata_rank_{rank}_of_{n_ranks}.json") + + +def make_model_config_filename(base: str) -> str: + """ + Make a filename for a model config file. + + Arguments: + base: Base directory. + + Returns: + str: Filename. + """ + return path.join(base, "ds_model_config.json") + + +def flatten_inference_model( + transformer_containers: Iterable[LayerContainer], + non_transformer_container: LayerContainer, + policy_name: str, +) -> Tuple[torch.Tensor, ModelMetadata]: + """ + Flatten the underlying parameters into + + Arguments: + transformer_containers: Iterable of layer containers corresponding to the transformer + parameters. + non_transformer_container: Layer container corresponding to the non-transformer parameters. + policy_name: The name of the policy class (typically accessed with `type(policy).__name__`). + + Returns: + Iterable[Any]: Flattened list of parameters. + """ + alloc_fn = RaggedUtilsBuilder().load().allocate_view_on + + total_size = 0 + metadata = ModelMetadata(policy=policy_name) + + def process_layer(layer_container: LayerContainer, l_name: str, cur_offset: int) -> int: + """ + Iterate over the parameters of a single container and collect metadata for the final + flattened buffer. + + Arguments: + layer_container: The layer container to process. + l_name: The name of the layer container to key the metadata. + cur_offset: The current offset into the flattened buffer. + + Captured Variables: + metadata: The metadata object to populate. + + Returns: + int: The updated offset into the flattened buffer. + """ + try: + _ = layer_container.is_populated + except ValueError as e: + raise ValueError(f"Layer container {l_name} is not populated.") from e + + layer_metadata = LayerMetadata() + + for p_name in layer_container.annotation_attrs: + param = getattr(layer_container, p_name) + param_metadata = ParameterMetadata() + + if param is None: + param_metadata.core_param = TensorMetadata(offset=-1) + layer_metadata.params[p_name] = param_metadata + continue + + param_metadata.core_param = TensorMetadata(dtype=str(param.dtype), + shape=param.shape, + strides=param.stride(), + offset=cur_offset) + + cur_offset += elem_size(param.dtype) * param.numel() + + for t_name, tensor in param.aux_attrs.items(): + param_metadata.aux_params[t_name] = TensorMetadata(dtype=str(tensor.dtype), + shape=tensor.shape, + strides=tensor.stride(), + offset=cur_offset) + + cur_offset += elem_size(param.dtype) * param.numel() + + layer_metadata.params[p_name] = param_metadata + + metadata.layers[l_name] = layer_metadata + return cur_offset + + for i, layer in enumerate(transformer_containers): + l_name = f"transformer_layer_{i}" + total_size = process_layer(layer, l_name, total_size) + + l_name = "non_transformer" + total_size = process_layer(non_transformer_container, l_name, total_size) + + buffer = torch.empty(total_size, dtype=torch.uint8, device=get_accelerator().current_device()) + + def copy_layer(layer_container: LayerContainer, l_name: str) -> None: + """ + Local method for copying from the layer container to the flattened buffer. + + Arguments: + layer_container: The layer container to copy from. + l_name: The name of the layer container to key the metadata. + + Captured Variables: + buffer: The flattened buffer to copy into. + metadata: The metadata object to populate. + """ + l_metadata = metadata.layers[l_name] + for p_name in layer_container.annotation_attrs: + p_metadata = l_metadata.params[p_name] + param = getattr(layer_container, p_name) + + if param is None: + continue + + core_param = alloc_fn(param, buffer, p_metadata.core_param.offset) + core_param.copy_(param) + + aux_params = {} + + for t_name, tensor in param.aux_attrs.items(): + t_view = alloc_fn(tensor, buffer, p_metadata.aux_params[t_name].offset) + aux_params[t_name] = t_view + t_view.copy_(tensor) + + setattr(layer_container, p_name, InferenceParameter.initialize(core_param, **aux_params)) + + for i, layer in enumerate(transformer_containers): + l_name = f"transformer_layer_{i}" + copy_layer(layer, l_name) + + l_name = "non_transformer" + copy_layer(non_transformer_container, l_name) + + return buffer, metadata + + +def restore_inference_model(buffer: torch.Tensor, metadata: ModelMetadata, + transformer_containers: Iterable[LayerContainer], + non_transformer_container: LayerContainer) -> None: + """ + Restore the model from the buffer and metadata. + + Arguments: + buffer: Buffer containing the model parameters. + metadata: Metadata for the model. + transformer_containers: Iterable of transformer layer containers. + non_transformer_container: Non-transformer layer container. + """ + alloc_fn = RaggedUtilsBuilder().load().allocate_view_like + + def restore_layer(layer_container: LayerContainer, l_name: str) -> None: + """ + Local method for restoring a layer container from a flattened buffer. This + only constructs views for the parameters onto the buffer. No data movement + is performed. + + Arguments: + layer_container: The layer container to restore. + l_name: The name of the layer container to key the metadata. + + Captured Variables: + buffer: The flattened buffer to reconstruct views on top of. + metadata: The metadata object describing the each parameter in the model. + """ + l_metadata = metadata.layers[l_name] + + for p_name in layer_container.annotation_attrs: + p_metadata = l_metadata.params[p_name] + + if p_metadata.core_param.offset == -1: + layer_container.direct_injection(p_name, None) + continue + + dummy_tensor = torch.empty([], dtype=STR_TO_DTYPE[p_metadata.core_param.dtype]) + core_param = alloc_fn(p_metadata.core_param.shape, p_metadata.core_param.strides, dummy_tensor, buffer, + p_metadata.core_param.offset) + + aux_params = {} + + for t_name, t_metadata in p_metadata.aux_params.items(): + dummy_tensor = torch.empty([], dtype=STR_TO_DTYPE[t_metadata.dtype]) + t_view = alloc_fn(t_metadata.shape, t_metadata.strides, dummy_tensor, buffer, t_metadata.offset) + + aux_params[t_name] = t_view + + restored_param = InferenceParameter.initialize(core_param, **aux_params) + layer_container.direct_injection(p_name, restored_param) + + for i, layer in enumerate(transformer_containers): + l_name = f"transformer_layer_{i}" + restore_layer(layer, l_name) + + l_name = "non_transformer" + restore_layer(non_transformer_container, l_name) diff --git a/deepspeed/inference/v2/model_implementations/inference_model_base.py b/deepspeed/inference/v2/model_implementations/inference_model_base.py index bc6b28f8ce52..d0efd7b532eb 100644 --- a/deepspeed/inference/v2/model_implementations/inference_model_base.py +++ b/deepspeed/inference/v2/model_implementations/inference_model_base.py @@ -8,11 +8,22 @@ import torch +import deepspeed.comm as dist from ..ragged import DSStateManager, RaggedBatchWrapper from ..ragged.manager_configs import KVCacheConfig from ..ragged import DSSequenceDescriptor from ..model_implementations.layer_container_base import LayerContainer from ..config_v2 import RaggedInferenceEngineConfig +from .flat_model_helpers import ModelMetadata + +try: + from functools import cached_property +except ImportError: + + def cached_property(func): + return property(func) + + """ This abstract class defines the interfaces that a model implementation should implement in order to include anything that may be called by the engine. Most models should be able @@ -92,13 +103,25 @@ def __init__(self, config: DSModelImplementationConfig, engine_config: RaggedInf # Set to None until the Policy sets the model parameters self._non_transformer = None self._transformer = None + self._flattened_param_buffer = None + self._flattened_param_metadata = None + + @property + def config(self) -> DSModelImplementationConfig: + """ + The model config. + """ + return self._config - def set_parameters(self, transformer: Iterable[LayerContainer], non_transformer: LayerContainer): + def set_parameters(self, transformer: Iterable[LayerContainer], non_transformer: LayerContainer, + flattened_param_buffer: torch.Tensor, flattened_param_metadata: ModelMetadata): """ Set the model parameters for the embedding, transformer, and unembedding containers. """ self._transformer = transformer self._non_transformer = non_transformer + self._flattened_param_buffer = flattened_param_buffer + self._flattened_param_metadata = flattened_param_metadata def set_state_manager(self, state_manager: DSStateManager): """ @@ -107,6 +130,54 @@ def set_state_manager(self, state_manager: DSStateManager): """ self.state_manager = state_manager + @cached_property + def tp_rank(self) -> int: + """ + The rank of the current process. + + # TODO(cmikeh2): Kind of a hack right now, but this is too verbose to use at + the frequency we need. + """ + return dist.get_rank(group=self._base_mp_group) + + @cached_property + def tp_size(self) -> int: + """ + The total number of processes. + + # TODO(cmikeh2): Kind of a hack right now, but this is too verbose to use at + the frequency we need. + """ + return dist.get_world_size(group=self._base_mp_group) + + @property + def model_config(self): + """ + The model config. + """ + return self._config + + @property + def engine_config(self): + """ + The engine config. + """ + return self._engine_config + + @property + def flattened_params(self) -> Optional[torch.Tensor]: + """ + The flattened parameter buffer. + """ + return self._flattened_param_buffer + + @property + def flattened_param_metadata(self) -> Optional[ModelMetadata]: + """ + The flattened parameter metadata. + """ + return self._flattened_param_metadata + @abstractmethod def get_kv_requirements(self, sequence: DSSequenceDescriptor, max_new_tokens: int, max_new_blocks: int) -> Tuple[int, int]: diff --git a/deepspeed/inference/v2/model_implementations/inference_policy_base.py b/deepspeed/inference/v2/model_implementations/inference_policy_base.py index f87c4f0c7cfc..d5a326c03599 100644 --- a/deepspeed/inference/v2/model_implementations/inference_policy_base.py +++ b/deepspeed/inference/v2/model_implementations/inference_policy_base.py @@ -3,14 +3,26 @@ # DeepSpeed Team -from abc import ABC, abstractmethod -from typing import Any, Iterable, List, Union +import json +from abc import ABC, ABCMeta, abstractmethod +from typing import Any, Iterable, List, Optional, Union + +import torch from ..config_v2 import RaggedInferenceEngineConfig from ..checkpoint import CheckpointEngineBase from ..logging import inference_logger from .layer_container_base import LayerContainer from .inference_model_base import DSInferenceModelBase +from .flat_model_helpers import ( + flatten_inference_model, + make_param_filename, + make_metadata_filename, + ModelMetadata, + restore_inference_model, +) + +POLICIES = {} class ContainerMap: @@ -80,15 +92,49 @@ def validate(self) -> None: f"Transformer container at index {layer_idx} not fully initialized after checkpoint load.") -class InferenceV2Policy(ABC): +class PolicyMeta(ABCMeta): + + def __new__(cls, name, bases, dct): + new_obj = super().__new__(cls, name, bases, dct) + if name != "InferenceV2Policy": + POLICIES[name] = new_obj + return new_obj + + +class InferenceV2Policy(ABC, metaclass=PolicyMeta): """ The InferenceV2Policy is the base class for all inference policies. An inference policy is responsible for instantiating the inference model and mapping the parameters from the checkpoint engine to the model itself. """ - def __init__(self, checkpoint_engine: CheckpointEngineBase, model_config: Any) -> None: + def __init__( + self, + model_config: Any, + checkpoint_engine: Optional[CheckpointEngineBase] = None, + inf_checkpoint_path: Optional[str] = None, + ) -> None: + """ + Create the Policy with sufficient context to build the model. There are two supported + model creation mechanisms. + + The first is the generalized ``checkpoint_engine`` which + will iterate over the parameters of the model and provide them to the policy. These in + turn will be sharded/transformed by the model implementation. + + The second is used to re-create a previously serialized DeepSpeed inference model. These + checkpoints should not be used across different model backend configurations. + + TODO(cmikeh2): Enforce this in code + """ + if checkpoint_engine is None and inf_checkpoint_path is None: + raise ValueError("Either checkpoint_engine or ds_checkpoint_path must be provided.") + + if checkpoint_engine is not None and inf_checkpoint_path is not None: + raise ValueError("Only one of checkpoint_engine or ds_checkpoint_path can be provided.") + self._checkpoint_engine = checkpoint_engine + self._inf_checkpoint_path = inf_checkpoint_path self._model_config = model_config def build_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> DSInferenceModelBase: @@ -147,9 +193,28 @@ def populate_model_parameters(self) -> None: """ container_map = self.build_container_map() - for name, parameter in self._checkpoint_engine.parameters(): - container_map.map_param(name, parameter) + + if self._checkpoint_engine is not None: + for name, parameter in self._checkpoint_engine.parameters(): + container_map.map_param(name, parameter) + + buffer, metadata = flatten_inference_model(container_map.transformer_params, + container_map.non_transformer_params, self.__class__.__name__) + else: + + buffer_path = make_param_filename(self._inf_checkpoint_path, self.model.tp_rank, self.model.tp_size) + metadata_path = make_metadata_filename(self._inf_checkpoint_path, self.model.tp_rank, self.model.tp_size) + + buffer = torch.load(buffer_path) + metadata = json.load(open(metadata_path, "r")) + metadata = ModelMetadata.parse_raw(metadata) + + restore_inference_model(buffer, metadata, container_map.transformer_params, + container_map.non_transformer_params) + container_map.validate() self.model.set_parameters(transformer=container_map.transformer_params, - non_transformer=container_map.non_transformer_params) + non_transformer=container_map.non_transformer_params, + flattened_param_buffer=buffer, + flattened_param_metadata=metadata) diff --git a/deepspeed/inference/v2/model_implementations/inference_transformer_base.py b/deepspeed/inference/v2/model_implementations/inference_transformer_base.py index ce3a486373bc..6bf16703e446 100644 --- a/deepspeed/inference/v2/model_implementations/inference_transformer_base.py +++ b/deepspeed/inference/v2/model_implementations/inference_transformer_base.py @@ -8,9 +8,7 @@ import torch -import deepspeed.comm as dist from deepspeed.accelerator import get_accelerator -from ..allocator import on_device from ..config_v2 import RaggedInferenceEngineConfig from ..inference_utils import ActivationType, ceil_div, is_gated from ..model_implementations import * @@ -36,6 +34,7 @@ DSModelImplementationConfig, MPType, ) +from ..inference_parameter import InferenceParameter try: from functools import cached_property @@ -157,26 +156,6 @@ def norm_type(self) -> NormTypeEnum: Derived helpers """ - @cached_property - def tp_rank(self) -> int: - """ - The rank of the current process. - - # TODO(cmikeh2): Kind of a hack right now, but this is too verbose to use at - the frequency we need. - """ - return dist.get_rank(group=self._base_mp_group) - - @cached_property - def tp_size(self) -> int: - """ - The total number of processes. - - # TODO(cmikeh2): Kind of a hack right now, but this is too verbose to use at - the frequency we need. - """ - return dist.get_world_size(group=self._base_mp_group) - @cached_property def n_heads_q_local(self) -> int: """ @@ -248,13 +227,13 @@ def make_embedding_layer(self) -> None: self.embed = heuristics.instantiate_embed(embed_config, self._engine_config) - @on_device - def transform_embedding_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_embedding_param(self, param: torch.Tensor) -> InferenceParameter: """ Performs embedding sharding along the channels dimension. """ # Until we can do non-contiguous all-gather, we won't shard the embedding parameters. - return param.to(self.activation_dtype.value) + param = param.to(self.activation_dtype.value) + return InferenceParameter.initialize(param) ######### Unembedding ######### def make_unembedding_layer(self) -> None: @@ -287,12 +266,12 @@ def make_unembedding_layer(self) -> None: device=get_accelerator().current_device(), dtype=self.activation_dtype.value) - @on_device - def transform_unembed_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_unembed_param(self, param: torch.Tensor) -> InferenceParameter: """ Performs sharding along the vocab dimension. """ - return shard_unembed_param(param, self.tp_rank, self.tp_size).to(self.activation_dtype.value) + param = shard_unembed_param(param, self.tp_rank, self.tp_size).to(self.activation_dtype.value) + return InferenceParameter.initialize(param) ######### QKV ######### def make_qkv_layer(self) -> None: @@ -313,8 +292,7 @@ def make_qkv_layer(self) -> None: self.qkv = heuristics.instantiate_linear(linear_config, self._engine_config) - @on_device - def transform_qkv_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_qkv_param(self, param: torch.Tensor) -> InferenceParameter: """ Passes a QKV parameter to the underlying implementation for any necessary transformations. @@ -422,8 +400,7 @@ def make_attn_out_layer(self) -> None: self.attn_out = heuristics.instantiate_linear(linear_config, self._engine_config) - @on_device - def transform_attn_out_param(self, param: torch.Tensor) -> Optional[torch.Tensor]: + def transform_attn_out_param(self, param: torch.Tensor) -> Optional[InferenceParameter]: """ Shards an attention output projection parameter and passes it to the underlying implementation for any necessary transformations. This will return `None` for bias parameters @@ -460,8 +437,7 @@ def make_mlp_1_layer(self) -> None: self.mlp_1 = heuristics.instantiate_linear(linear_config, self._engine_config) - @on_device - def transform_mlp_1_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_mlp_1_param(self, param: torch.Tensor) -> InferenceParameter: """ Shards the first MLP parameter and passes it to the underlying implementation for any necessary transformations. @@ -491,8 +467,7 @@ def make_mlp_2_layer(self) -> None: self.mlp_2 = heuristics.instantiate_linear(linear_config, self._engine_config) - @on_device - def transform_mlp_2_param(self, param: torch.Tensor) -> Optional[torch.Tensor]: + def transform_mlp_2_param(self, param: torch.Tensor) -> Optional[InferenceParameter]: """ Shards the second MLP parameter and passes it to the underlying implementation for any necessary transformations. This will return `None` for bias parameters @@ -528,8 +503,7 @@ def make_norm_layer(self) -> None: self.norm = heuristics.instantiate_pre_norm(norm_config, self._engine_config) - @on_device - def transform_norm_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_norm_param(self, param: torch.Tensor) -> InferenceParameter: """ Passes a normalization parameter to the underlying implementation for any necessary transformations. @@ -571,8 +545,7 @@ def make_moe_layer(self) -> None: self.moe = heuristics.instantiate_moe(moe_config, self._engine_config) - @on_device - def transform_moe_gate_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_moe_gate_param(self, param: torch.Tensor) -> InferenceParameter: """ Passes a MoE gate parameter to the underlying implementation for any necessary transformations. @@ -580,8 +553,7 @@ def transform_moe_gate_param(self, param: torch.Tensor) -> torch.Tensor: """ return self.moe.transform_gate_param(param) - @on_device - def transform_moe_mlp_1_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_moe_mlp_1_param(self, param: torch.Tensor) -> InferenceParameter: """ Shards the first MoE param and passes it to the underlying implementation. Since it's possible for an architecture to have both MoE and non-MoE layers, this can't be overloaded on the MLP1 transform. Furthermore, since both @@ -596,7 +568,6 @@ def transform_moe_mlp_1_param(self, param: torch.Tensor) -> torch.Tensor: return self.moe.transform_moe_mlp_1_param(param) - @on_device def transform_moe_mlp_2_param(self, param: torch.Tensor) -> Optional[torch.Tensor]: """ Shards the second MoE param and passes it to the underlying implementation. See the above for context on why this API diff --git a/deepspeed/inference/v2/model_implementations/layer_container_base.py b/deepspeed/inference/v2/model_implementations/layer_container_base.py index e0ec19372569..98e3e0bb31ed 100644 --- a/deepspeed/inference/v2/model_implementations/layer_container_base.py +++ b/deepspeed/inference/v2/model_implementations/layer_container_base.py @@ -9,6 +9,7 @@ from deepspeed.accelerator import get_accelerator from .parameter_base import ParameterBase, ParametrizedList +from ..inference_parameter import InferenceParameter # Currently have dependency loops for the type hints. InferenceModel = Type["InferenceModel"] @@ -199,8 +200,7 @@ def __init__(self, model: InferenceModel) -> None: self.inference_model = model self._finalized_params = 0 - @property - def is_initialized(self) -> bool: + def _initialization_checker(self, check_device: bool = True) -> bool: """ Returns whether or not all parameters have been initialized and transformed by the model. Once this returns True, all the `ParameterBase` instances will be @@ -213,14 +213,32 @@ def is_initialized(self) -> bool: tensor = getattr(self, name) if tensor is None: continue - elif not isinstance(tensor, torch.Tensor): - raise ValueError("Layer should be finalized, but {} is neither Tensor or None".format(name)) - elif tensor.device != torch.device(get_accelerator().current_device()): + elif not isinstance(tensor, InferenceParameter): + raise ValueError("Layer should be finalized, but {} ({}) is neither InferenceParameter or None".format( + name, type(tensor))) + elif check_device and tensor.device != torch.device(get_accelerator().current_device()): raise RuntimeError("Layer should be finalized, but {} is not on device {}".format( name, get_accelerator().current_device())) return True + @property + def is_populated(self) -> bool: + """ + Returns whether or not all parameters have been populated by the checkpoint engine, but + does not validat the parameters are on the correct device. + """ + return self._initialization_checker(check_device=False) + + @property + def is_initialized(self) -> bool: + """ + Returns whether or not all parameters have been initialized and transformed by + the model and are located on the appropriate device. Once this returns True, all + the `ParameterBase` instances ``InferenceParameter``s or explicitly set to ``None``. + """ + return self._initialization_checker() + @property def n_params(self) -> int: """ @@ -229,6 +247,10 @@ def n_params(self) -> int: """ return self._n_params + @property + def annotation_attrs(self) -> list: + return self._annotation_attrs + @property def mapping_params(self) -> dict: return getattr(self.__class__, MAPPING_KEY, {}) @@ -237,6 +259,14 @@ def mapping_params(self) -> dict: def plist_helpers(self) -> list: return getattr(self.__class__, PLIST_HELPERS, []) + def direct_injection(self, name: str, tensor: InferenceParameter) -> None: + + if name not in self._annotation_attrs: + raise ValueError(f"Cannot directly inject {name}, not a valid parameter.") + + setattr(self, name, tensor) + self._finalized_params += 1 + def set_dependency(self, dep_name: str, dep_value: torch.Tensor) -> None: """ Set dependency can be used for managing dependencies when a mapping is provided @@ -279,11 +309,6 @@ def set_dependency(self, dep_name: str, dep_value: torch.Tensor) -> None: target_dependency = getattr(target_param, target_dependency_name) target_dependency[target_idx] = dep_value return - raise ValueError( "Could not find a mapping for dependency \"{}\". Check that it is included in the ``MAPPING_PARAMS``. See docstring for more on ``MAPPING_PARAMS``" .format(dep_name)) - - -class ContainerMap: - pass diff --git a/deepspeed/inference/v2/model_implementations/llama_v2/__init__.py b/deepspeed/inference/v2/model_implementations/llama_v2/__init__.py index 208299fb8c50..5d2b5ae562ee 100644 --- a/deepspeed/inference/v2/model_implementations/llama_v2/__init__.py +++ b/deepspeed/inference/v2/model_implementations/llama_v2/__init__.py @@ -2,3 +2,5 @@ # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team + +from .llama_v2_policy import Llama2Policy diff --git a/deepspeed/inference/v2/model_implementations/llama_v2/llama_v2_containers.py b/deepspeed/inference/v2/model_implementations/llama_v2/llama_v2_containers.py index ec39866d0d8d..e9c473ce512b 100644 --- a/deepspeed/inference/v2/model_implementations/llama_v2/llama_v2_containers.py +++ b/deepspeed/inference/v2/model_implementations/llama_v2/llama_v2_containers.py @@ -49,7 +49,6 @@ class Llama2TransformerContainer(LayerContainer): mlp_2_w: MLP2Parameter attn_norm_gamma: NormParameter mlp_norm_gamma: NormParameter - #rotary_emb: InvFreqParameter PARAM_MAPPING = { "self_attn.q_proj.weight": "qkv_w.q_params", @@ -61,7 +60,6 @@ class Llama2TransformerContainer(LayerContainer): "mlp.down_proj.weight": "mlp_2_w.params", "input_layernorm.weight": "attn_norm_gamma.params", "post_attention_layernorm.weight": "mlp_norm_gamma.params", - #"self_attn.rotary_emb.inv_freq": "rotary_emb.params", } diff --git a/deepspeed/inference/v2/model_implementations/llama_v2/llama_v2_policy.py b/deepspeed/inference/v2/model_implementations/llama_v2/llama_v2_policy.py index 65fe7b705e53..c8253be79fad 100644 --- a/deepspeed/inference/v2/model_implementations/llama_v2/llama_v2_policy.py +++ b/deepspeed/inference/v2/model_implementations/llama_v2/llama_v2_policy.py @@ -3,11 +3,8 @@ # DeepSpeed Team -import argparse - from typing import Any -from ...checkpoint import CheckpointEngineBase from ...config_v2 import RaggedInferenceEngineConfig from ...model_implementations.inference_policy_base import ContainerMap, InferenceV2Policy from ...model_implementations.llama_v2.llama_v2_containers import Llama2NonTransformerContainer, Llama2TransformerContainer @@ -16,9 +13,6 @@ class Llama2Policy(InferenceV2Policy): - def __init__(self, checkpoint_engine: CheckpointEngineBase, model_config: argparse.Namespace) -> None: - super().__init__(checkpoint_engine, model_config) - def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> Llama2InferenceModel: return Llama2InferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group) diff --git a/deepspeed/inference/v2/model_implementations/mistral/__init__.py b/deepspeed/inference/v2/model_implementations/mistral/__init__.py index 208299fb8c50..60d636693ef3 100644 --- a/deepspeed/inference/v2/model_implementations/mistral/__init__.py +++ b/deepspeed/inference/v2/model_implementations/mistral/__init__.py @@ -2,3 +2,5 @@ # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team + +from .policy import MistralPolicy diff --git a/deepspeed/inference/v2/model_implementations/mistral/policy.py b/deepspeed/inference/v2/model_implementations/mistral/policy.py index 93458827aa96..f6d0a0fe5987 100644 --- a/deepspeed/inference/v2/model_implementations/mistral/policy.py +++ b/deepspeed/inference/v2/model_implementations/mistral/policy.py @@ -3,11 +3,8 @@ # DeepSpeed Team -import argparse - from typing import Any -from deepspeed.inference.v2.checkpoint import CheckpointEngineBase from deepspeed.inference.v2.config_v2 import RaggedInferenceEngineConfig from deepspeed.inference.v2.model_implementations.inference_policy_base import ContainerMap, InferenceV2Policy from deepspeed.inference.v2.model_implementations.mistral.container import MistralNonTransformerContainer, MistralTransformerContainer @@ -16,9 +13,6 @@ class MistralPolicy(InferenceV2Policy): - def __init__(self, checkpoint_engine: CheckpointEngineBase, model_config: argparse.Namespace) -> None: - super().__init__(checkpoint_engine, model_config) - def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> MistralInferenceModel: return MistralInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group) diff --git a/deepspeed/inference/v2/model_implementations/opt/__init__.py b/deepspeed/inference/v2/model_implementations/opt/__init__.py index 208299fb8c50..c0f24d5243b8 100644 --- a/deepspeed/inference/v2/model_implementations/opt/__init__.py +++ b/deepspeed/inference/v2/model_implementations/opt/__init__.py @@ -2,3 +2,5 @@ # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team + +from .policy import OPTPolicy diff --git a/deepspeed/inference/v2/model_implementations/opt/policy.py b/deepspeed/inference/v2/model_implementations/opt/policy.py index 0f5002cdaa54..002fab93b462 100644 --- a/deepspeed/inference/v2/model_implementations/opt/policy.py +++ b/deepspeed/inference/v2/model_implementations/opt/policy.py @@ -3,11 +3,8 @@ # DeepSpeed Team -import argparse - from typing import Any -from ...checkpoint import CheckpointEngineBase from ...config_v2 import RaggedInferenceEngineConfig from ...model_implementations.inference_policy_base import ContainerMap, InferenceV2Policy from ...model_implementations.opt.container import OPTNonTransformerContainer, OPTTransformerContainer @@ -16,9 +13,6 @@ class OPTPolicy(InferenceV2Policy): - def __init__(self, checkpoint_engine: CheckpointEngineBase, model_config: argparse.Namespace) -> None: - super().__init__(checkpoint_engine, model_config) - def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> OPTInferenceModel: return OPTInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group) diff --git a/deepspeed/inference/v2/model_implementations/parameter_base.py b/deepspeed/inference/v2/model_implementations/parameter_base.py index a413c6c4027a..2dcb63c050a0 100644 --- a/deepspeed/inference/v2/model_implementations/parameter_base.py +++ b/deepspeed/inference/v2/model_implementations/parameter_base.py @@ -12,10 +12,8 @@ # Currently have dependency loops for the type hints. InferenceModel = Type["InferenceModel"] LayerContainer = Type["LayerContainer"] -ParametrizedList = Type["ParametrizedList"] MAPPING_KEY = "PARAM_MAPPING" -PLIST_HELPERS = "_ds_plist_strip_vals" def make_param_getter(clsname, param): diff --git a/deepspeed/inference/v2/modules/implementations/linear/__init__.py b/deepspeed/inference/v2/modules/implementations/linear/__init__.py index 5acdc69dba7c..e76aab71c4cf 100644 --- a/deepspeed/inference/v2/modules/implementations/linear/__init__.py +++ b/deepspeed/inference/v2/modules/implementations/linear/__init__.py @@ -4,4 +4,3 @@ # DeepSpeed Team from .blas_fp_linear import BlasFPLinear -from .cutlass_fp_linear import DSCutlassFPLinear diff --git a/deepspeed/inference/v2/modules/implementations/linear/blas_fp_linear.py b/deepspeed/inference/v2/modules/implementations/linear/blas_fp_linear.py index cbb8801e7193..c58dab0b826b 100644 --- a/deepspeed/inference/v2/modules/implementations/linear/blas_fp_linear.py +++ b/deepspeed/inference/v2/modules/implementations/linear/blas_fp_linear.py @@ -18,6 +18,7 @@ from ...interfaces import DSLinearBase, DSLinearRegistry from ...configs import DSLinearConfig +from ....inference_parameter import InferenceParameter @DSLinearRegistry.register_module @@ -70,14 +71,15 @@ def __init__(self, config: DSLinearConfig, implementation_config: Dict[str, Any] dtype=config.output_dtype, device=get_accelerator().current_device()) - def transform_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_param(self, param: torch.Tensor) -> InferenceParameter: """ Converts param to same data type as input and output. Parameters: param (torch.Tensor): Weight or bias tensor. """ - return param.to(self._config.input_dtype) + param = param.to(self._config.output_dtype) + return InferenceParameter.initialize(param) def forward(self, hidden_states: torch.Tensor, w: torch.Tensor, b: Optional[torch.Tensor] = None) -> torch.Tensor: diff --git a/deepspeed/inference/v2/modules/implementations/linear/cutlass_fp_linear.py b/deepspeed/inference/v2/modules/implementations/linear/cutlass_fp_linear.py deleted file mode 100644 index a3704eaa82a2..000000000000 --- a/deepspeed/inference/v2/modules/implementations/linear/cutlass_fp_linear.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -from typing import Any, Dict, Optional - -import torch - -from deepspeed.accelerator import get_accelerator -from ....allocator import empty_from -from ....inference_utils import ActivationType -from ....kernels.core_ops import CUDAGatedActivation - -from ...interfaces import DSLinearBase, DSLinearRegistry -from ...configs import DSLinearConfig - - -@DSLinearRegistry.register_module -class DSCutlassFPLinear(DSLinearBase): - """ - Linear DSModule based on CUTLASS floating point kernel implementation. - """ - - @staticmethod - def name(): - return 'cutlass_fp_linear' - - @staticmethod - def supports_config(config: DSLinearConfig) -> bool: - if config.input_dtype != config.output_dtype: - return False - - if config.input_dtype != torch.float16 and config.input_dtype != torch.bfloat16: - return False - - return True - - def __init__(self, config: DSLinearConfig, implementation_config: Dict[str, Any]) -> None: - super().__init__(config, implementation_config) - - # TODO: Load kernel - - if config.activation == ActivationType.GEGLU: - self._geglu = CUDAGatedActivation(config.out_channels, config.output_dtype, ActivationType.GEGLU) - self._activation_int = torch.empty((config.max_tokens, config.out_channels * 2), - dtype=config.output_dtype, - device=get_accelerator().current_device()) - - self._output = torch.empty((config.max_tokens, config.out_channels), - dtype=config.output_dtype, - device=get_accelerator().current_device()) - - def transform_param(self, param: torch.Tensor) -> torch.Tensor: - """ - Converts param to same data type as input and output. - - Parameters: - param (torch.Tensor): Weight or bias tensor. - """ - return param.to(self._config.input_dtype) - - def forward(self, hidden_states: torch.Tensor, w: torch.Tensor, b: Optional[torch.Tensor] = None) -> torch.Tensor: - - output = empty_from(self._output, (hidden_states.shape[0], self._config.out_channels)) - - if self._config.activation == ActivationType.GEGLU: - intermediate = empty_from(self._activation_int, (hidden_states.shape[0], self._config.out_channels * 2)) - self._linear_impl(intermediate, hidden_states, w, b) - self._geglu(output, intermediate) - else: - self._linear_impl(output, hidden_states, w, b) - - return output - - @property - def output(self) -> torch.Tensor: - """ - Return the padded, pre-allocated output Tensor. - """ - return self._output diff --git a/deepspeed/inference/v2/modules/implementations/moe/cutlass_multi_gemm.py b/deepspeed/inference/v2/modules/implementations/moe/cutlass_multi_gemm.py index fb2388c450f0..e43a737515ed 100644 --- a/deepspeed/inference/v2/modules/implementations/moe/cutlass_multi_gemm.py +++ b/deepspeed/inference/v2/modules/implementations/moe/cutlass_multi_gemm.py @@ -21,6 +21,7 @@ from ...interfaces import DSMoEBase, DSMoERegistry from ...configs import DSMoEConfig from ....kernels.cutlass_ops import MoEGEMM +from ....inference_parameter import InferenceParameter @DSMoERegistry.register_module @@ -111,13 +112,14 @@ def _create_buffers(self): dtype=self._config.output_dtype, device=get_accelerator().current_device()) - def transform_gate_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_gate_param(self, param: torch.Tensor) -> InferenceParameter: """ Ensures gate param is going to match the activation data type. """ - return param.to(self._config.input_dtype) + param = param.to(self._config.input_dtype) + return InferenceParameter.initialize(param) - def transform_moe_mlp_1_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_moe_mlp_1_param(self, param: torch.Tensor) -> InferenceParameter: """ Converts param to same data type as input and output. @@ -127,11 +129,10 @@ def transform_moe_mlp_1_param(self, param: torch.Tensor) -> torch.Tensor: param = param.to(self._config.input_dtype) if len(param.shape) == 3: - return param.permute(0, 2, 1).contiguous() - else: - return param + param = param.permute(0, 2, 1).contiguous() + return InferenceParameter.initialize(param) - def transform_moe_mlp_2_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_moe_mlp_2_param(self, param: torch.Tensor) -> InferenceParameter: """ Converts param to same data type as input and output. @@ -141,9 +142,8 @@ def transform_moe_mlp_2_param(self, param: torch.Tensor) -> torch.Tensor: param = param.to(self._config.input_dtype) if len(param.shape) == 3: - return param.permute(0, 2, 1).contiguous() - else: - return param + param = param.permute(0, 2, 1).contiguous() + return InferenceParameter.initialize(param) @property def output(self) -> torch.Tensor: diff --git a/deepspeed/inference/v2/modules/implementations/moe/gate_fn.py b/deepspeed/inference/v2/modules/implementations/moe/gate_fn.py deleted file mode 100644 index 9eceaab156e4..000000000000 --- a/deepspeed/inference/v2/modules/implementations/moe/gate_fn.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import torch -import torch.nn.functional as F -from torch import Tensor - -from typing import Tuple - -#TODO(cmikeh2): DELETE - - -@torch.jit.script -def _capacity(gates: Tensor, capacity_factor: Tensor, min_capacity: Tensor) -> Tensor: - # gates has shape of SE - num_tokens = gates.shape[0] - num_experts = gates.shape[-1] - # to(torch.int64) works around a bug in torch.onnx.export: - # it should cast k to int64 when converting torch.topk but it doesn't. - capacity = torch.ceil((num_tokens / num_experts) * capacity_factor).to(torch.int64) - if capacity < min_capacity: - capacity = min_capacity.to(torch.int64) - return capacity - - -@torch.jit.script -def _top_idx(source, k): - return torch.topk(source, k=k, dim=0)[1] - - -def top1gating(logits: Tensor, - capacity_factor: float, - min_capacity: int, - drop_tokens: bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: - # everything is in fp32 in this function - gates = F.softmax(logits, dim=1) - - capacity = _capacity(gates, torch.tensor(capacity_factor), torch.tensor(min_capacity)) - - # Create a mask for 1st's expert per token - indices1_s = torch.argmax(gates, dim=1) - num_experts = int(gates.shape[1]) - mask1 = F.one_hot(indices1_s, num_classes=num_experts) - - # gating decisions - exp_counts = torch.sum(mask1, dim=0).detach().to('cpu') - - assert logits.shape[ - 0] >= min_capacity, "No. of tokens (batch-size) should be greater than min_capacity. Either set min_capacity to 0 or increase your batch size." - - top_idx = _top_idx(mask1, capacity) - - mask1 = mask1 * torch.zeros_like(mask1).scatter_(0, top_idx, 1) - - indices_mask = mask1.sum(dim=1) * num_experts - 1 - indices1_s = torch.min(indices1_s, indices_mask) - - gates1_s = (gates * mask1).sum(dim=1) - - return indices1_s, gates1_s diff --git a/deepspeed/inference/v2/modules/implementations/moe/test.py b/deepspeed/inference/v2/modules/implementations/moe/test.py deleted file mode 100644 index b714366d32ec..000000000000 --- a/deepspeed/inference/v2/modules/implementations/moe/test.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import torch -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.modules.interfaces import DSMoERegistry -from deepspeed.inference.modules.configs import DSMoEConfig -from deepspeed.inference.modules.module_registry import ConfigBundle - - -def run_multi_gemm_with_gating(inputs, gate_weight, moe_weight1, moe_bias1, moe_weight2): - config = DSMoEConfig(model_dim=4096, intermediate_features=4096, n_experts=64, max_tokens=128) - moe = DSMoERegistry.instantiate_config( - ConfigBundle(name='cutlass_multi_gemm_moe', - config=config, - implementation_config={ - "weight_dtype": torch.bfloat16, - "transpose_weight": True, - "min_capacity": 8, - "capacity_factor": 1.0 - })) - out = moe(inputs, gate_weight, moe_weight1, moe_weight2, moe_bias1) - return out - - -a = torch.randn( - 128, - 4096, -).bfloat16().to(get_accelerator().current_device()) -weight1 = torch.randn(64, 4096, 4096).bfloat16().to(get_accelerator().current_device()) -bias1 = torch.randn(64, 4096).bfloat16().to(get_accelerator().current_device()) -weight2 = torch.randn(64, 4096, 4096).bfloat16().to(get_accelerator().current_device()) -gate_weight = torch.randn(64, 4096).bfloat16().to(get_accelerator().current_device()) - -out = run_multi_gemm_with_gating(a, gate_weight, weight1, bias1, weight2) -print(out) diff --git a/deepspeed/inference/v2/modules/implementations/post_norm/cuda_post_ln.py b/deepspeed/inference/v2/modules/implementations/post_norm/cuda_post_ln.py index b30c5b937ed2..9b2af4bb9023 100644 --- a/deepspeed/inference/v2/modules/implementations/post_norm/cuda_post_ln.py +++ b/deepspeed/inference/v2/modules/implementations/post_norm/cuda_post_ln.py @@ -12,6 +12,7 @@ from ...configs import DSNormConfig from ....kernels.core_ops.cuda_layer_norm.cuda_post_ln import CUDAFPPostLN from ....allocator import empty_from +from ....inference_parameter import InferenceParameter @DSPostNormRegistry.register_module @@ -40,8 +41,9 @@ def __init__(self, config: DSNormConfig, implementation_config: Dict[str, Any]): dtype=config.output_dtype, device=get_accelerator().current_device()) - def transform_param(self, param: torch.Tensor) -> torch.Tensor: - return param.to(self._config.input_dtype) + def transform_param(self, param: torch.Tensor) -> InferenceParameter: + param = param.to(self._config.input_dtype) + return InferenceParameter.initialize(param) def forward(self, residual: torch.Tensor, hidden_in: torch.Tensor, gamma: torch.Tensor, beta: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: diff --git a/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_ln.py b/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_ln.py index f50f4a3d2db6..90783ce8c9a6 100644 --- a/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_ln.py +++ b/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_ln.py @@ -13,6 +13,7 @@ from ....kernels.core_ops.cuda_layer_norm.cuda_pre_ln import CUDAFPPreLN from ....kernels.core_ops.cuda_layer_norm.cuda_ln import CUDAFPLN from ....allocator import empty_from +from ....inference_parameter import InferenceParameter @DSPreNormRegistry.register_module @@ -47,8 +48,9 @@ def __init__(self, config: DSNormConfig, implementation_config: Dict[str, Any]): dtype=config.output_dtype, device=get_accelerator().current_device()) - def transform_param(self, param: torch.Tensor) -> torch.Tensor: - return param.to(self._config.input_dtype) + def transform_param(self, param: torch.Tensor) -> InferenceParameter: + param = param.to(self._config.input_dtype) + return InferenceParameter.initialize(param) def forward(self, residual: torch.Tensor, hidden_in: Optional[torch.Tensor], gamma: torch.Tensor, beta: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: diff --git a/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_rms.py b/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_rms.py index 7aeea4b2d386..986262b31b1f 100644 --- a/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_rms.py +++ b/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_rms.py @@ -12,6 +12,7 @@ from ...configs import DSNormConfig, NormTypeEnum from ....kernels.core_ops import CUDARMSNorm, CUDARMSPreNorm from ....allocator import empty_from +from ....inference_parameter import InferenceParameter @DSPreNormRegistry.register_module @@ -50,8 +51,9 @@ def __init__(self, config: DSNormConfig, implementation_config: Dict[str, Any]): dtype=config.output_dtype, device=get_accelerator().current_device()) - def transform_param(self, param: torch.Tensor) -> torch.Tensor: - return param.to(self._config.input_dtype) + def transform_param(self, param: torch.Tensor) -> InferenceParameter: + param = param.to(self._config.input_dtype) + return InferenceParameter.initialize(param) def forward(self, residual: torch.Tensor, diff --git a/deepspeed/inference/v2/modules/interfaces/embedding_base.py b/deepspeed/inference/v2/modules/interfaces/embedding_base.py index 8078013e36b6..1ab7e5f0b7a2 100644 --- a/deepspeed/inference/v2/modules/interfaces/embedding_base.py +++ b/deepspeed/inference/v2/modules/interfaces/embedding_base.py @@ -13,6 +13,7 @@ from ..ds_module import DSModuleBase from ..module_registry import DSModuleRegistryBase from ..configs import DSEmbeddingsConfig +from ...inference_parameter import InferenceParameter class DSEmbeddingBase(DSModuleBase): @@ -32,7 +33,7 @@ def config_class() -> Type[DeepSpeedConfigModel]: def __init__(self, config: DSEmbeddingsConfig, implementation_config: Dict[str, Any]) -> None: super().__init__(config, implementation_config) - def transform_param(self, embed_param: torch.Tensor) -> torch.Tensor: + def transform_param(self, embed_param: torch.Tensor) -> InferenceParameter: """ Perform any necessary transformations on an embedding parameter. This module assumes that all embedding parameters would require the same set of transformations. @@ -59,7 +60,7 @@ def forward(self, word_embeddings: torch.Tensor, position_embeddings: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, - token_type_embeddings: Optional[torch.Tensor] = None) -> torch.Tensor: + token_type_embeddings: Optional[torch.Tensor] = None) -> InferenceParameter: """ Parameters: ragged_batch (torch.Tensor): Ragged batch of token ids + associated metadata. diff --git a/deepspeed/inference/v2/modules/interfaces/linear_base.py b/deepspeed/inference/v2/modules/interfaces/linear_base.py index bcaad6fe269a..fe6ccbcd9344 100644 --- a/deepspeed/inference/v2/modules/interfaces/linear_base.py +++ b/deepspeed/inference/v2/modules/interfaces/linear_base.py @@ -12,6 +12,7 @@ from ..ds_module import DSModuleBase from ..module_registry import DSModuleRegistryBase from ..configs import DSLinearConfig +from ...inference_parameter import InferenceParameter class DSLinearBase(DSModuleBase): @@ -33,7 +34,7 @@ def __init__(self, config: DSLinearConfig, implementation_config: Dict[str, Any] super().__init__(config, implementation_config) @abstractmethod - def transform_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_param(self, param: torch.Tensor) -> InferenceParameter: """ Perform any necessary transformations of the parameters of this module. diff --git a/deepspeed/inference/v2/modules/interfaces/moe_base.py b/deepspeed/inference/v2/modules/interfaces/moe_base.py index cc80ca55f60a..78bdc0700f63 100644 --- a/deepspeed/inference/v2/modules/interfaces/moe_base.py +++ b/deepspeed/inference/v2/modules/interfaces/moe_base.py @@ -12,6 +12,7 @@ from ..ds_module import DSModuleBase from ..module_registry import DSModuleRegistryBase from ..configs import DSMoEConfig +from ...inference_parameter import InferenceParameter class DSMoEBase(DSModuleBase): @@ -31,7 +32,7 @@ def __init__(self, config: DSMoEConfig, implementation_config: Dict[str, Any]) - super().__init__(config, implementation_config) @abstractmethod - def transform_gate_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_gate_param(self, param: torch.Tensor) -> InferenceParameter: """ Perform any necessary transformations of the gate parameter. @@ -41,7 +42,7 @@ def transform_gate_param(self, param: torch.Tensor) -> torch.Tensor: ... @abstractmethod - def transform_moe_mlp_1_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_moe_mlp_1_param(self, param: torch.Tensor) -> InferenceParameter: """ Perform any necessary transformations of the parameter. The specific component being transformed should be inferred from the shape of the parameter. @@ -52,7 +53,7 @@ def transform_moe_mlp_1_param(self, param: torch.Tensor) -> torch.Tensor: ... @abstractmethod - def transform_moe_mlp_2_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_moe_mlp_2_param(self, param: torch.Tensor) -> InferenceParameter: """ Perform any necessary transformations of the parameter. The specified component being transformed should be inferred from the shape of the parameter. This interface is diff --git a/deepspeed/inference/v2/modules/interfaces/post_norm_base.py b/deepspeed/inference/v2/modules/interfaces/post_norm_base.py index c2a6bf69de8a..cc80e5c94bf7 100644 --- a/deepspeed/inference/v2/modules/interfaces/post_norm_base.py +++ b/deepspeed/inference/v2/modules/interfaces/post_norm_base.py @@ -12,6 +12,7 @@ from ..ds_module import DSModuleBase from ..configs.norm_config import DSNormConfig from ..module_registry import DSModuleRegistryBase +from ...inference_parameter import InferenceParameter class DSPostNormBase(DSModuleBase): @@ -33,7 +34,7 @@ def __init__(self, config: DSNormConfig, implementation_config: Dict[str, Any]) super().__init__(config, implementation_config) @abstractmethod - def transform_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_param(self, param: torch.Tensor) -> InferenceParameter: """ Transform a gamma/beta parameter. It is assumed that both transformations are the same. diff --git a/deepspeed/inference/v2/modules/interfaces/pre_norm_base.py b/deepspeed/inference/v2/modules/interfaces/pre_norm_base.py index 7d8b4ebf1587..84f51cff6947 100644 --- a/deepspeed/inference/v2/modules/interfaces/pre_norm_base.py +++ b/deepspeed/inference/v2/modules/interfaces/pre_norm_base.py @@ -12,6 +12,7 @@ from ..ds_module import DSModuleBase from ..configs.norm_config import DSNormConfig from ..module_registry import DSModuleRegistryBase +from ...inference_parameter import InferenceParameter class DSPreNormBase(DSModuleBase): @@ -38,7 +39,7 @@ def __init__(self, config: DSNormConfig, implementation_config: Dict[str, Any]): super().__init__(config, implementation_config) @abstractmethod - def transform_param(self, param: torch.Tensor) -> torch.Tensor: + def transform_param(self, param: torch.Tensor) -> InferenceParameter: """ Transform a gamma/beta parameter. It is assumed that both transformations are the same. diff --git a/deepspeed/inference/v2/ragged/csrc/ragged_ops.cpp b/deepspeed/inference/v2/ragged/csrc/ragged_ops.cpp index 0c5e8812c84c..8a29dd2d5945 100644 --- a/deepspeed/inference/v2/ragged/csrc/ragged_ops.cpp +++ b/deepspeed/inference/v2/ragged/csrc/ragged_ops.cpp @@ -37,9 +37,40 @@ torch::Tensor allocate_fast_host_buffer(torch::Tensor device_mirror) return buffer; } +torch::Tensor allocate_view_on(torch::Tensor& tensor, torch::Tensor& buffer, int64_t offset) +{ + int8_t* data = reinterpret_cast(buffer.data_ptr()); + + auto options = tensor.options().device(buffer.device()); + + return at::from_blob(data + offset, tensor.sizes(), tensor.strides(), options); +} + +torch::Tensor allocate_view_like(py::tuple shape, + py::tuple strides, + torch::Tensor& dummy_tensor, + torch::Tensor& buffer, + int64_t offset) +{ + int8_t* data = reinterpret_cast(buffer.data_ptr()); + + auto options = torch::TensorOptions().device(buffer.device()).dtype(dummy_tensor.dtype()); + + return at::from_blob(data + offset, + shape.cast>(), + strides.cast>(), + options); +} + PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { m.def("allocate_fast_host_buffer", &allocate_fast_host_buffer, "Allocate a host mirror of an accelerator Tensor."); + m.def("allocate_view_on", + &allocate_view_on, + "Allocate a view on a Tensor on the same device as the input Tensor."); + m.def("allocate_view_like", + &allocate_view_like, + "Allocate a view on a Tensor on the same device as the input Tensor."); } diff --git a/tests/unit/inference/kernels/core_ops/__init__.py b/tests/unit/inference/kernels/core_ops/__init__.py deleted file mode 100644 index 208299fb8c50..000000000000 --- a/tests/unit/inference/kernels/core_ops/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team diff --git a/tests/unit/inference/kernels/core_ops/test_bias_activation.py b/tests/unit/inference/kernels/core_ops/test_bias_activation.py deleted file mode 100644 index 2c6134991597..000000000000 --- a/tests/unit/inference/kernels/core_ops/test_bias_activation.py +++ /dev/null @@ -1,101 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -from typing import Optional - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.inference_utils import ActivationType, DtypeEnum -from deepspeed.inference.v2.kernels.core_ops import CUDABiasActivation -from ...inference_test_utils import get_dtypes, allclose - - -def reference_bias_act_implementation(input: torch.Tensor, bias: Optional[torch.Tensor], - act_type: ActivationType) -> torch.Tensor: - bias_func_map = { - ActivationType.RELU: torch.nn.functional.relu, - ActivationType.GELU: torch.nn.functional.gelu, - ActivationType.SILU: torch.nn.functional.silu, - ActivationType.IDENTITY: lambda x: x, - } - - dtype = input.dtype - input_f = input.to(torch.float32) - if bias is not None: - bias_f = bias.to(torch.float32) - output_f = input_f + bias_f - else: - output_f = input_f - output_f = bias_func_map[act_type](output_f) - - return output_f.to(dtype) - - -def _bias_activation_test_helper(tokens: int, - channels: int, - act_fn: ActivationType, - dtype: DtypeEnum, - use_bias: bool = True) -> None: - """ - Fully parameterized testing entry point. - """ - # Input vals - input_tensor = torch.randn((tokens, channels), dtype=dtype.value, device=get_accelerator().current_device_name()) - if use_bias: - bias = torch.randn((channels), dtype=dtype.value, device=get_accelerator().current_device_name()) - else: - bias = None - - # Reference output - ref_output = reference_bias_act_implementation(input_tensor, bias, act_fn) - - bias_act = CUDABiasActivation(channels, dtype, act_fn) - - # New output - ds_tensor = input_tensor.clone() - bias_act(ds_tensor, bias) - - # Check - assert allclose(ds_tensor, ref_output) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("tokens, channels", [(1, 4096), (37, 2048), (112, 14432), (1024, 6144)]) -@pytest.mark.parametrize("dtype", get_dtypes(include_float=False)) -def test_token_channels_permutations(tokens: int, channels: int, dtype: torch.dtype) -> None: - """ - Validate bias activation kernel with different token and channel permutations when using the RELU - activation function. - """ - act_fn = ActivationType.RELU - dtype = DtypeEnum(dtype) - _bias_activation_test_helper(tokens, channels, act_fn, dtype) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("act_fn", - [ActivationType.RELU, ActivationType.GELU, ActivationType.SILU, ActivationType.IDENTITY]) -def test_act_fns(act_fn: ActivationType) -> None: - """ - Validate bias activation kernel with different activation functions. - """ - tokens = 223 - channels = 4096 - dtype = DtypeEnum.fp16 - _bias_activation_test_helper(tokens, channels, act_fn, dtype) - - -@pytest.mark.inference_v2_ops -def test_no_bias() -> None: - """ - Validate bias activation kernel with no bias. - """ - tokens = 223 - channels = 4096 - dtype = DtypeEnum.fp16 - act_fn = ActivationType.IDENTITY - _bias_activation_test_helper(tokens, channels, act_fn, dtype, use_bias=False) diff --git a/tests/unit/inference/kernels/core_ops/test_blas_linear.py b/tests/unit/inference/kernels/core_ops/test_blas_linear.py deleted file mode 100644 index 0f9f99b4f879..000000000000 --- a/tests/unit/inference/kernels/core_ops/test_blas_linear.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -from typing import Tuple - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.kernels.core_ops import BlasLibLinear -from ...inference_test_utils import allclose - -# Note: only testing with FP16 and BF16 because we use TF32 on Ampere and we don't have a good -# set of tolerances. Since this is just on top of BLAS though, the test is more about -# making sure the stride/contiguity is correct and that's data type agnostic. - - -def reference_implementation(hidden_states, weights): - return hidden_states @ weights.t() - - -problem_shapes = [ - (1, 1, 1024, 1024), - (1, 1024, 1024, 1024), - (2, 1024, 1024, 1024), - (1, 128, 768, 3072), - (1, 128, 3072, 768), - (1, 1024, 8192, 8192), - (1, 733, 8192, 32768), - (1, 13, 32768, 8192), -] - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("fp_dtype", [torch.float16, torch.bfloat16]) -@pytest.mark.parametrize("problem_shape", problem_shapes) -def test_blas_linear(fp_dtype: torch.dtype, problem_shape: Tuple[int, int, int, int]): - batch, seq_len, in_features, out_features = problem_shape - hidden_states = torch.randn(batch, seq_len, in_features, dtype=fp_dtype, - device=get_accelerator().current_device()) * 0.1 - weights = torch.randn(out_features, in_features, dtype=fp_dtype, device=get_accelerator().current_device()) * 0.01 - ds_output = torch.empty(batch, seq_len, out_features, dtype=fp_dtype, device=get_accelerator().current_device()) - - ds_kernel = BlasLibLinear(fp_dtype) - - ds_output = ds_kernel(ds_output, hidden_states, weights) - ref_output = reference_implementation(hidden_states, weights) - - assert allclose(ds_output, ref_output) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("fp_dtype", [torch.float16, torch.bfloat16]) -@pytest.mark.parametrize("problem_shape", problem_shapes) -def test_blas_linear_t(fp_dtype: torch.dtype, problem_shape: Tuple[int, int, int, int]): - batch, seq_len, in_features, out_features = problem_shape - hidden_states = torch.randn(batch, seq_len, in_features, dtype=fp_dtype, - device=get_accelerator().current_device()) * 0.1 - weights = torch.randn(out_features, in_features, dtype=fp_dtype, device=get_accelerator().current_device()) * 0.01 - ds_output = torch.empty(batch, seq_len, out_features, dtype=fp_dtype, device=get_accelerator().current_device()) - - ds_kernel = BlasLibLinear(fp_dtype) - - # Transpose the weights then revert to the format we expect. - weights = weights.t().contiguous() - weights = weights.t() - ds_output = ds_kernel(ds_output, hidden_states, weights) - - ref_output = reference_implementation(hidden_states, weights) - - assert allclose(ds_output, ref_output) diff --git a/tests/unit/inference/kernels/core_ops/test_gated_activation.py b/tests/unit/inference/kernels/core_ops/test_gated_activation.py deleted file mode 100644 index ebfca4801eea..000000000000 --- a/tests/unit/inference/kernels/core_ops/test_gated_activation.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -from typing import Iterable, Optional - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.kernels.core_ops import CUDAGatedActivation -from deepspeed.inference.v2.inference_utils import ActivationType -from ...inference_test_utils import get_dtypes, allclose - - -def reference_geglu_implementation(input: torch.Tensor, - bias: Optional[torch.Tensor] = None, - act_fn: Optional[ActivationType] = ActivationType.GEGLU) -> torch.Tensor: - act_func_map = { - ActivationType.ReGLU: torch.nn.functional.relu, - ActivationType.GEGLU: lambda x: torch.nn.functional.gelu(x, approximate="tanh"), - ActivationType.SiGLU: torch.nn.functional.silu, - } - - dtype = input.dtype - input = input.to(torch.float32) - - if bias is not None: - bias = bias.to(torch.float32) - input = input + bias - - act_act = input[..., ::2] - act_linear = input[..., 1::2] - - act_act = act_func_map[act_fn](act_act) - - return (act_act * act_linear).to(dtype) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("shape", [(1372, 16384), (2, 743, 22016)]) -@pytest.mark.parametrize("dtype", get_dtypes()) -def test_dtypes(shape: Iterable[int], dtype: torch.dtype) -> None: - input_tensor = torch.randn(shape, dtype=dtype, device=get_accelerator().current_device_name()) - - # Reference output - ref_output = reference_geglu_implementation(input_tensor, act_fn=ActivationType.GEGLU) - - # Build kernel - geglu = CUDAGatedActivation(input_tensor.size(-1), input_tensor.dtype, ActivationType.GEGLU) - - # New output - output_shape = list(input_tensor.shape) - output_shape[-1] //= 2 - output_tensor = torch.empty(output_shape, dtype=input_tensor.dtype, device=get_accelerator().current_device_name()) - geglu(output_tensor, input_tensor) - - # Check - assert allclose(output_tensor, ref_output) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("act_fn", [ActivationType.GEGLU, ActivationType.ReGLU, ActivationType.SiGLU]) -def test_act_fn(act_fn: ActivationType) -> None: - input_tensor = torch.randn(832, 4096, dtype=torch.float16, device=get_accelerator().current_device()) - - # Reference output - ref_output = reference_geglu_implementation(input_tensor, act_fn=act_fn) - - cuda_act = CUDAGatedActivation(4096, torch.float16, act_fn) - - # New output - output_tensor = torch.empty(832, 2048, dtype=torch.float16, device=get_accelerator().current_device()) - cuda_act(output_tensor, input_tensor) - - assert allclose(output_tensor, ref_output) - - -@pytest.mark.inference_v2_ops -def test_act_with_bias(): - input_tensor = torch.randn(832, 4096, dtype=torch.float16, device=get_accelerator().current_device()) - bias = torch.randn(4096, dtype=torch.float16, device=get_accelerator().current_device()) - - # Reference output - ref_output = reference_geglu_implementation(input_tensor, bias=bias, act_fn=ActivationType.GEGLU) - - cuda_act = CUDAGatedActivation(4096, torch.float16, ActivationType.GEGLU) - - # New output - output_tensor = torch.empty(832, 2048, dtype=torch.float16, device=get_accelerator().current_device()) - - cuda_act(output_tensor, input_tensor, bias) - - assert allclose(output_tensor, ref_output) - - -@pytest.mark.inference_v2_ops -def test_max_channels(): - input_tensor = torch.randn(832, 48152, dtype=torch.float16, device=get_accelerator().current_device()) - - ref_output = reference_geglu_implementation(input_tensor, act_fn=ActivationType.GEGLU) - - cuda_act = CUDAGatedActivation(48152, torch.float16, ActivationType.GEGLU) - - output_tensor = torch.empty(832, 24076, dtype=torch.float16, device=get_accelerator().current_device()) - cuda_act(output_tensor, input_tensor) - - assert allclose(output_tensor, ref_output) - - -@pytest.mark.inference_v2_ops -def test_bad_dtype() -> None: - with pytest.raises(ValueError): - CUDAGatedActivation(128, torch.int8, ActivationType.GEGLU) - - -@pytest.mark.inference_v2_ops -def test_bad_act_fn() -> None: - with pytest.raises(ValueError): - CUDAGatedActivation(128, torch.float16, ActivationType.RELU) - - -@pytest.mark.inference_v2_ops -def test_bad_alignment() -> None: - with pytest.raises(ValueError): - CUDAGatedActivation(127, torch.float16, ActivationType.GEGLU) - - -@pytest.mark.inference_v2_ops -def test_too_many_channels() -> None: - with pytest.raises(ValueError): - CUDAGatedActivation(49160, torch.float16, ActivationType.GEGLU) diff --git a/tests/unit/inference/kernels/core_ops/test_post_ln.py b/tests/unit/inference/kernels/core_ops/test_post_ln.py deleted file mode 100644 index 8b54e5651acb..000000000000 --- a/tests/unit/inference/kernels/core_ops/test_post_ln.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.kernels.core_ops import CUDAFPPostLN -from ...inference_test_utils import get_dtypes, allclose - - -def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, - beta: torch.Tensor, epsilon: float) -> torch.Tensor: - residual_f = residual.to(torch.float32) - hidden_states_f = hidden_states.to(torch.float32) - gamma_f = gamma.to(torch.float32) - beta_f = beta.to(torch.float32) - return torch.nn.functional.layer_norm(residual_f + hidden_states_f, (hidden_states_f.size(-1), ), - weight=gamma_f, - bias=beta_f, - eps=epsilon).to(hidden_states.dtype) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("tokens, channels", [(1, 4096), (37, 2048), (112, 14432), (1024, 6144)]) -@pytest.mark.parametrize("dtype", get_dtypes()) -def test_cuda_post_ln(tokens: int, channels: int, dtype: torch.dtype) -> None: - - # Input vals - hidden_states = torch.randn((tokens, channels), dtype=dtype, device=get_accelerator().current_device_name()) - residual = torch.randn((tokens, channels), dtype=dtype, device=get_accelerator().current_device_name()) - gamma = torch.randn((channels), dtype=dtype, device=get_accelerator().current_device_name()) - beta = torch.rand((channels), dtype=dtype, device=get_accelerator().current_device_name()) - epsilon = 1e-5 - - # Reference output - ref_output = reference_implementation(residual, hidden_states, gamma, beta, epsilon) - - # New output - post_ln_kernel = CUDAFPPostLN(hidden_states.size(-1), residual.dtype) - ds_output = torch.empty_like(residual) - post_ln_kernel(ds_output, residual, hidden_states, gamma, beta) - - # Check - assert allclose(ds_output, ref_output) diff --git a/tests/unit/inference/kernels/core_ops/test_pre_ln.py b/tests/unit/inference/kernels/core_ops/test_pre_ln.py deleted file mode 100644 index e5ac3ae1428f..000000000000 --- a/tests/unit/inference/kernels/core_ops/test_pre_ln.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.kernels.core_ops import CUDAFPPreLN -from ...inference_test_utils import get_dtypes, allclose - - -def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, - beta: torch.Tensor, epsilon: float) -> torch.Tensor: - residual_f = residual.to(torch.float32) - hidden_states_f = hidden_states.to(torch.float32) - gamma_f = gamma.to(torch.float32) - beta_f = beta.to(torch.float32) - residual_out = residual_f + hidden_states_f - hidden_out = torch.nn.functional.layer_norm(residual_out, (hidden_states_f.size(-1), ), - weight=gamma_f, - bias=beta_f, - eps=epsilon) - return residual_out.to(hidden_states.dtype), hidden_out.to(hidden_states.dtype) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("tokens, channels", [(1, 4096), (37, 2048), (112, 14432), (1024, 6144)]) -@pytest.mark.parametrize("dtype", get_dtypes()) -def test_cuda_pre_ln(tokens: int, channels: int, dtype: torch.dtype) -> None: - - # Input vals - hidden_states = torch.randn((tokens, channels), dtype=dtype, device=get_accelerator().current_device_name()) - residual = torch.randn((tokens, channels), dtype=dtype, device=get_accelerator().current_device_name()) - gamma = torch.randn((channels), dtype=dtype, device=get_accelerator().current_device_name()) - beta = torch.rand((channels), dtype=dtype, device=get_accelerator().current_device_name()) - epsilon = 1e-5 - - # Reference output - ref_output_res, ref_output_hid = reference_implementation(residual, hidden_states, gamma, beta, epsilon) - - # New output - pre_ln_kernel = CUDAFPPreLN(hidden_states.size(-1), residual.dtype) - ds_output_res = torch.empty_like(residual) - ds_output_hid = torch.empty_like(hidden_states) - pre_ln_kernel(ds_output_res, ds_output_hid, residual, hidden_states, gamma, beta) - - # Check - assert allclose(ds_output_res, ref_output_res) - assert allclose(ds_output_hid, ref_output_hid) diff --git a/tests/unit/inference/kernels/core_ops/test_rms_norm.py b/tests/unit/inference/kernels/core_ops/test_rms_norm.py deleted file mode 100644 index d2893a2115b7..000000000000 --- a/tests/unit/inference/kernels/core_ops/test_rms_norm.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.inference_utils import DtypeEnum -from deepspeed.inference.v2.kernels.core_ops import CUDARMSNorm, CUDARMSPreNorm -from ...inference_test_utils import get_dtypes, allclose - - -def reference_rms_norm(vals: torch.Tensor, gamma: torch.Tensor, epsilon: float = 1e-5) -> torch.Tensor: - variance = vals.to(torch.float32).pow(2).mean(-1, keepdim=True) - vals = vals * torch.rsqrt(variance + epsilon) - - if gamma.dtype in [torch.float16, torch.bfloat16]: - vals = vals.to(gamma.dtype) - - return gamma * vals - - -def reference_rms_pre_norm(vals: torch.Tensor, - residual: torch.Tensor, - gamma: torch.Tensor, - epsilon: float = 1e-5) -> torch.Tensor: - residual = residual + vals - return residual, reference_rms_norm(residual, gamma, epsilon) - - -def _rms_norm_testing_helper(rows: int, channels: int, do_residual: bool, dtype: DtypeEnum) -> None: - device = get_accelerator().current_device_name() - t_dtype = dtype.value - - vals = torch.randn((rows, channels), dtype=t_dtype, device=device) - gamma = torch.randn((channels), dtype=t_dtype, device=device) - epsilon = 1e-5 - - if do_residual: - residual_in = torch.randn((rows, channels), dtype=t_dtype, device=device) - ds_residual = residual_in.clone() - - ref_residual, ref_output = reference_rms_pre_norm(vals, residual_in, gamma, epsilon) - - kernel = CUDARMSPreNorm(channels, t_dtype, epsilon=epsilon) - ds_out = torch.empty_like(ds_residual) - - kernel(ds_residual, ds_out, residual_in, vals, gamma) - - assert allclose(ds_out, ref_output) - assert allclose(ds_residual, ref_residual) - else: - - ref_output = reference_rms_norm(vals, gamma, epsilon) - - kernel = CUDARMSNorm(channels, t_dtype, epsilon=epsilon) - ds_out = torch.empty_like(vals) - - kernel(ds_out, vals, gamma) - - assert allclose(ds_out, ref_output) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("dtype", get_dtypes()) -@pytest.mark.parametrize("do_residual", [True, False]) -def test_rms_dtypes(dtype: DtypeEnum, do_residual: bool) -> None: - _rms_norm_testing_helper(883, 1024, do_residual, DtypeEnum(dtype)) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("rows, cols", [(1, 4096), (37, 2048), (112, 14432), (1024, 6144)]) -@pytest.mark.parametrize("do_residual", [True, False]) -def test_rms_shapes(rows: int, cols: int, do_residual: bool) -> None: - _rms_norm_testing_helper(rows, cols, do_residual, DtypeEnum.fp16) diff --git a/tests/unit/inference/kernels/cutlass_ops/__init__.py b/tests/unit/inference/kernels/cutlass_ops/__init__.py deleted file mode 100644 index 208299fb8c50..000000000000 --- a/tests/unit/inference/kernels/cutlass_ops/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team diff --git a/tests/unit/inference/kernels/cutlass_ops/test_moe_gemm.py b/tests/unit/inference/kernels/cutlass_ops/test_moe_gemm.py deleted file mode 100644 index 4dd6d286fe00..000000000000 --- a/tests/unit/inference/kernels/cutlass_ops/test_moe_gemm.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.inference_utils import ActivationType, DtypeEnum -from deepspeed.inference.v2.kernels.cutlass_ops import MoEGEMM -from ...inference_test_utils import allclose - -SINGLE_EXPERT_CASES = [(13, 2048, 2048), (256, 1024, 4096), (278, 5120, 2048), (893, 5120, 2560)] - -PYTORCH_ACT_FN_MAP = { - ActivationType.GELU: torch.nn.functional.gelu, - ActivationType.SILU: torch.nn.functional.silu, - ActivationType.RELU: torch.nn.functional.relu -} - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("n_tokens, in_neurons, out_neurons", SINGLE_EXPERT_CASES) -def test_single_expert(n_tokens: int, in_neurons: int, out_neurons: int) -> None: - """ - Validate that the GEMM kernel produces identical results for a single GEMM instance. - """ - device = get_accelerator().current_device() - - activations = torch.rand((n_tokens, in_neurons), device=device, dtype=torch.float16) - 0.5 - weights = torch.rand((1, in_neurons, out_neurons), device=device, dtype=torch.float16) - 0.5 - biases = torch.randn((1, out_neurons), device=device, dtype=torch.float16) - - weights_ref = weights.reshape(in_neurons, out_neurons) - biases_ref = biases.reshape(out_neurons) - ref_output = torch.matmul(activations, weights_ref) + biases_ref - - moe_gemm = MoEGEMM(DtypeEnum.fp16, ActivationType.IDENTITY) - output = torch.empty((n_tokens, out_neurons), device=device, dtype=torch.float16) - cumsum_rows = torch.tensor([n_tokens], dtype=torch.int64, device=device) - - moe_gemm(output, activations, weights, cumsum_rows, biases) - assert allclose(output, ref_output, tolerances=(1e-2, 1e-2)) - get_accelerator().synchronize() - - -def moe_test_helper(in_neurons: int, out_neurons: int, n_experts: int, max_tokens_per_expert: int, - act_fn: ActivationType, dtype: DtypeEnum) -> None: - """ - Helper function for validating the GEMM kernel for a single expert. - """ - device = get_accelerator().current_device() - - expert_allocations = torch.randint(0, max_tokens_per_expert, (n_experts, ), device=device, dtype=torch.int32) - cumsum_rows = expert_allocations.cumsum(dim=0) - print(cumsum_rows.dtype) - - activations = torch.rand((cumsum_rows[-1], in_neurons), device=device, dtype=dtype.value) - 0.5 - weights = torch.rand((n_experts, in_neurons, out_neurons), device=device, dtype=dtype.value) - 0.5 - biases = torch.randn((n_experts, out_neurons), device=device, dtype=dtype.value) - - out_ref = torch.empty((cumsum_rows[-1], out_neurons), device=device, dtype=dtype.value) - - for expert_idx in range(n_experts): - start = cumsum_rows[expert_idx - 1] if expert_idx > 0 else 0 - end = cumsum_rows[expert_idx] - activations_slice = activations[start:end] - weights_slice = weights[expert_idx] - biases_slice = biases[expert_idx] - out_ref[start:end] = torch.matmul(activations_slice, weights_slice) + biases_slice - - if act_fn != ActivationType.IDENTITY: - act_fn_fn = PYTORCH_ACT_FN_MAP[act_fn] - out_ref = act_fn_fn(out_ref) - - moe_gemm = MoEGEMM(DtypeEnum.fp16, act_fn) - output = torch.empty((cumsum_rows[-1], out_neurons), device=device, dtype=dtype.value) - - moe_gemm(output, activations, weights, cumsum_rows, biases) - - if dtype == DtypeEnum.bf16: - assert allclose(output, out_ref, tolerances=(1e-1, 1e-1)) - else: - assert allclose(output, out_ref, tolerances=(1e-2, 1e-2)) - get_accelerator().synchronize() - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("max_tokens_per_expert", [1, 4, 16, 64, 128]) -def test_multi_expert(max_tokens_per_expert: int) -> None: - """ - Validate for multi-expert GEMM instances that the output is identical to the reference. - """ - moe_test_helper(5120, 2048, 64, max_tokens_per_expert, ActivationType.IDENTITY, DtypeEnum.fp16) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("act_fn", [ActivationType.GELU, ActivationType.SILU, ActivationType.RELU]) -def test_act_fns(act_fn: ActivationType) -> None: - """ - Validate activation function behavior. - """ - moe_test_helper(5120, 2048, 64, 32, act_fn, DtypeEnum.fp16) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("dtype", [DtypeEnum.fp16, DtypeEnum.bf16]) -def test_dtypes(dtype: DtypeEnum) -> None: - """ - Validate data type behavior. - """ - moe_test_helper(5120, 2048, 64, 32, ActivationType.IDENTITY, dtype) diff --git a/tests/unit/inference/kernels/ragged_ops/__init__.py b/tests/unit/inference/kernels/ragged_ops/__init__.py deleted file mode 100644 index 208299fb8c50..000000000000 --- a/tests/unit/inference/kernels/ragged_ops/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team diff --git a/tests/unit/inference/kernels/ragged_ops/ragged_testing_utils.py b/tests/unit/inference/kernels/ragged_ops/ragged_testing_utils.py deleted file mode 100644 index 445c6c38b87f..000000000000 --- a/tests/unit/inference/kernels/ragged_ops/ragged_testing_utils.py +++ /dev/null @@ -1,300 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import random -from typing import List, Optional, Tuple - -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.ragged import ( - AllocationMode, - DSSequenceDescriptor, - DSStateManager, - DSStateManagerConfig, - KVCacheConfig, - MemoryConfig, - PlaceholderSequenceDescriptor, - RaggedBatchWrapper, -) -from ...inference_test_utils import allclose - - -def build_simple_batch(seq_lens: List[int], - vocab_range: Optional[int] = 100, - padding: Optional[bool] = False) -> RaggedBatchWrapper: - """ - Construct a simple batch with the given sequence lengths. This method should not - be used for for testing scenarios that require information about KV or sequence - history. - """ - total_tokens = max(sum(seq_lens), 1024) - n_seqs = max(len(seq_lens), 128) - - config = DSStateManagerConfig(max_tracked_sequences=n_seqs, - max_ragged_sequence_count=n_seqs, - max_ragged_batch_size=total_tokens) - batch = RaggedBatchWrapper(config) - - batch.clear() - - for seq_len in seq_lens: - seq_desc = PlaceholderSequenceDescriptor() - tokens = torch.randint(0, vocab_range, (seq_len, )) - batch.insert_sequence(seq_desc, tokens) - - batch.finalize(padding=padding) - - return batch - - -def build_complex_batch(seq_params: List[Tuple[int, int, int]], - kv_block_size: int, - vocab_range: Optional[int] = 100, - padding: Optional[bool] = False) -> Tuple[RaggedBatchWrapper, int]: - """ - Construct a fully paramtrized batch with the given sequence lengths. This method - can be used to construct more realistic inputs for testing scenarios that will interact - with all the members of the RaggedBatchWrapper. - """ - seq_lens = [seq_param[0] for seq_param in seq_params] - total_tokens = max(sum(seq_lens), 1024) - n_seqs = max(len(seq_lens), 128) - - config = DSStateManagerConfig(max_tracked_sequences=n_seqs, - max_ragged_sequence_count=n_seqs, - max_ragged_batch_size=total_tokens) - batch = RaggedBatchWrapper(config) - - batch.clear() - - total_kv_blocks = 0 - - for seq_len, n_seen_tokens, kv_ptr in seq_params: - n_kv_blocks = (seq_len + n_seen_tokens + kv_block_size - 1) // kv_block_size - seq_desc = PlaceholderSequenceDescriptor(seen_tokens=n_seen_tokens, - cur_allocated_blocks=n_kv_blocks, - kv_blocks_ptr=kv_ptr) - tokens = torch.randint(0, vocab_range, (seq_len, )) - batch.insert_sequence(seq_desc, tokens) - total_kv_blocks += n_kv_blocks - - batch.finalize(padding=padding) - - return batch, total_kv_blocks - - -def build_batch_and_manager( - seq_params: List[Tuple[int, int]], - head_size: int, - n_heads_kv: int, - kv_block_size: int, - vocab_range: Optional[int] = 100, - padding: Optional[bool] = False, - kv_fill: Optional[List[torch.Tensor]] = None -) -> Tuple[RaggedBatchWrapper, DSStateManager, List[DSSequenceDescriptor]]: - """ - Will construct and populate a batch and KVCache with the given sequence parameters. - - Arguments: - seq_params (List[Tuple[int, int]]): A list of tuples containing the sequence length and - the number of tokens that have already been seen for that sequence. - head_size (int): The size of each attention head. - n_heads_kv (int): The number of attention heads for the KV-cache. - kv_block_size (int): The size of each block in the KV-cache. - vocab_range (Optional[int]): The range of the vocabulary. Defaults to 100. - padding (Optional[bool]): Whether to pad the batch. Defaults to False. - kv_fill (Optional[List[torch.Tensor]]): A list of tensors to use to populate the KV-cache. - If this is not provided, the KV-cache will be treated as empty and the contents should - not be relied upon. NOTE(cmikeh2): This functionality relies on the functionality - of LinearBlockedKVCopy. If tests relying on this feature are failing, make sure that - LinearBlockedKVCopy is working correctly. - """ - seq_lens = [seq_param[0] for seq_param in seq_params] - fill_lens = [seq_param[1] for seq_param in seq_params] - max_created_batch_len = max(sum(seq_lens), sum(fill_lens)) - total_tokens = max(max_created_batch_len, 1024) - n_seqs = max(len(seq_lens), 128) - - req_kv_blocks = [None] * n_seqs - total_kv_blocks = 0 - for i, (seq_len, n_seen_tokens) in enumerate(seq_params): - req_kv_blocks[i] = (seq_len + n_seen_tokens + kv_block_size - 1) // kv_block_size - total_kv_blocks += req_kv_blocks[i] - - kv_config = KVCacheConfig(block_size=kv_block_size, - num_allocation_groups=1, - cache_shape=(1, n_heads_kv, head_size)) - memory_config = MemoryConfig(mode=AllocationMode.ALLOCATE, size=total_kv_blocks) - - config = DSStateManagerConfig(max_tracked_sequences=n_seqs, - max_ragged_sequence_count=n_seqs, - max_ragged_batch_size=total_tokens, - memory_config=memory_config) - - batch = RaggedBatchWrapper(config) - state_manager = DSStateManager(config, kv_config) - - # At the beginning of operation, the design of the allocator is such that it will return - # linear blocks of memory. The following will "warm up" the allocator so that we can be - # more certain that code is not dependent on this behavior. - all_allocs = [] - for _ in range(20): - decision = random.randint(0, 1) - - if decision == 0: - blocks_to_allocate = random.randint(0, total_kv_blocks) - if blocks_to_allocate <= state_manager.free_blocks and blocks_to_allocate > 0: - all_allocs.append(state_manager.allocate_blocks(blocks_to_allocate)) - else: - if len(all_allocs) > 0: - idx = random.randint(0, len(all_allocs) - 1) - state_manager._kv_cache.free(all_allocs[idx]) - - del all_allocs[idx] - - for alloc in all_allocs: - state_manager._kv_cache.free(alloc) - - assert state_manager.free_blocks == total_kv_blocks - - batch.clear() - seq_descs = [] - - if kv_fill is None or sum(fill_lens) == 0: - for i, (seq_len, n_seen_tokens) in enumerate(seq_params): - # Create empty descriptor - seq_desc = state_manager.get_or_create_sequence(i) - - # Update `seen_tokens` in the descriptor - seq_desc.pre_forward(n_seen_tokens) - seq_desc.post_forward() - - # Ensure there's enough KV-cache for the sequence - kv_block_ids = state_manager.allocate_blocks(req_kv_blocks[i]) - print(f"Allocated {req_kv_blocks[i]} blocks for sequence {i}: {kv_block_ids}") - seq_desc.extend_kv_cache(kv_block_ids) - - # Insert sequence into batch - tokens = torch.randint(0, vocab_range, (seq_len, )) - batch.insert_sequence(seq_desc, tokens) - seq_desc.pre_forward(seq_len) - seq_descs.append(seq_desc) - else: - qkv = torch.empty((total_tokens, (n_heads_kv * 3) * head_size), - dtype=torch.float16, - device=get_accelerator().current_device()) - fills_as_tensor = torch.tensor(fill_lens, dtype=torch.int32) - fill_cumsum = torch.cat((torch.tensor([0], dtype=torch.int32), torch.cumsum(fills_as_tensor, dim=0))) - - for i, (_, n_seen_tokens) in enumerate(seq_params): - # Create empty descriptor - seq_desc = state_manager.get_or_create_sequence(i) - - # Update `seen_tokens` in the descriptor - if n_seen_tokens > 0: - dummy_fill_toks = torch.randint(0, vocab_range, (n_seen_tokens, )) - batch.insert_sequence(seq_desc, dummy_fill_toks) - seq_desc.pre_forward(n_seen_tokens) - - # Ensure there's enough KV-cache for the sequence - kv_block_ids = state_manager.allocate_blocks(req_kv_blocks[i]) - print(f"Allocated {req_kv_blocks[i]} blocks for sequence {i}: {kv_block_ids}") - seq_desc.extend_kv_cache(kv_block_ids) - seq_descs.append(seq_desc) - - if n_seen_tokens == 0: - continue - - assert kv_fill[i].shape[0] == n_seen_tokens - assert kv_fill[i].shape[1] == n_heads_kv * head_size * 2 - - local_q = torch.randn((n_seen_tokens, n_heads_kv * head_size), dtype=torch.float16, device=qkv.device) - local_qkv = torch.cat((local_q, kv_fill[i]), dim=1) - qkv[fill_cumsum[i]:fill_cumsum[i + 1]] = local_qkv - - batch.finalize(padding=padding) - - from deepspeed.inference.v2.kernels.ragged_ops import LinearBlockedKVCopy - kv_copy = LinearBlockedKVCopy(head_size, n_heads_kv, n_heads_kv, torch.float16) - kv_cache = state_manager.get_cache(0) - kv_copy(kv_cache, qkv, batch) - - for seq_desc in seq_descs: - if seq_desc.in_flight_tokens > 0: - seq_desc.post_forward() - - batch.clear() - - for i, (seq_len, _) in enumerate(seq_params): - seq_desc = state_manager.get_or_create_sequence(i) - tokens = torch.randint(0, vocab_range, (seq_len, )) - batch.insert_sequence(seq_desc, tokens) - seq_desc.pre_forward(seq_len) - - # We will skip KV cache allocation here because we did a lump allocation above - # for both the fill and the sequence itself. - - batch.finalize(padding=padding) - - return batch, state_manager, seq_descs - - -def validate_kv_cache(kv_cache: torch.Tensor, - k: torch.Tensor, - v: torch.Tensor, - seq_descs: List[DSSequenceDescriptor], - batch: RaggedBatchWrapper, - exact: bool = True) -> None: - """ - Given a QKV tensor and a KV cache, validate that the cache contains the correct values. - """ - block_size = kv_cache.shape[1] - n_kv_heads = kv_cache.shape[3] - head_size = kv_cache.shape[4] - - inflight_descs = batch.inflight_seq_descriptors(on_device=False)[:batch.current_sequences] - - if inflight_descs.shape[0] != len(seq_descs): - raise ValueError("The number of sequence descriptors does not match the number of sequences in the batch.") - - for seq_desc, inflight_seq in zip(seq_descs, inflight_descs): - start_idx = inflight_seq[0] - assigned_kv_blocks = seq_desc.kv_cache_ids(on_device=False) - - real_k_values = k[start_idx:start_idx + seq_desc.in_flight_tokens] - real_v_values = v[start_idx:start_idx + seq_desc.in_flight_tokens] - - start_block_idx = seq_desc.seen_tokens // block_size - local_start_idx = 0 - cur_start_idx = seq_desc.seen_tokens - - for block_idx in range(start_block_idx, seq_desc.cur_allocated_blocks): - block = kv_cache[assigned_kv_blocks[0, block_idx].item()] - block_start_idx = cur_start_idx % block_size - n_tokens_to_check = min(block_size - block_start_idx, seq_desc.in_flight_tokens - local_start_idx) - block_end_idx = block_start_idx + n_tokens_to_check - - if exact: - assert torch.equal( - block[block_start_idx:block_end_idx, 0, :, :], - real_k_values[local_start_idx:local_start_idx + n_tokens_to_check].reshape( - n_tokens_to_check, n_kv_heads, head_size)) - assert torch.equal( - block[block_start_idx:block_end_idx, 1, :, :], - real_v_values[local_start_idx:local_start_idx + n_tokens_to_check].reshape( - n_tokens_to_check, n_kv_heads, head_size)) - else: - assert allclose( - block[block_start_idx:block_end_idx, 0, :, :], - real_k_values[local_start_idx:local_start_idx + n_tokens_to_check].reshape( - n_tokens_to_check, n_kv_heads, head_size)) - assert allclose( - block[block_start_idx:block_end_idx, 1, :, :], - real_v_values[local_start_idx:local_start_idx + n_tokens_to_check].reshape( - n_tokens_to_check, n_kv_heads, head_size)) - - local_start_idx += n_tokens_to_check - cur_start_idx += n_tokens_to_check diff --git a/tests/unit/inference/kernels/ragged_ops/test_atom_builder.py b/tests/unit/inference/kernels/ragged_ops/test_atom_builder.py deleted file mode 100644 index a33c938a0608..000000000000 --- a/tests/unit/inference/kernels/ragged_ops/test_atom_builder.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from deepspeed.inference.v2.kernels.ragged_ops import AtomBuilder -from .ragged_testing_utils import build_complex_batch - -Q_BLOCK_SIZE = 128 -KV_BLOCK_SIZE = 128 - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize('seq_params', [(1, 0, 0), (1, 228, 0), (383, 0, 0), (1, 494, 0)]) -def test_single_sequence(seq_params) -> None: - seq_len, n_seen_tokens, _ = seq_params - - batch, _ = build_complex_batch([seq_params], kv_block_size=KV_BLOCK_SIZE, padding=False) - atom_builder = AtomBuilder() - - atoms = torch.empty((8, 8), dtype=torch.int32, device=torch.device("cpu")) - atoms, n_atoms = atom_builder(atoms, batch, Q_BLOCK_SIZE, KV_BLOCK_SIZE) - - calc_n_atoms = (seq_len + 127) // 128 - - assert n_atoms == calc_n_atoms - - for i, atom in enumerate(atoms[:n_atoms]): - # Since the ptr was 0, first 2 elements should be 0 - assert atom[0] == 0 - assert atom[1] == 0 - - # Since we have a single sequence, the q_start_idx should always be - # whichever atom we're on multiplied by the block size - assert atom[2] == i * Q_BLOCK_SIZE - assert atom[3] == min(Q_BLOCK_SIZE, seq_len - i * Q_BLOCK_SIZE) - total_toks = i * Q_BLOCK_SIZE + min(Q_BLOCK_SIZE, seq_len - i * Q_BLOCK_SIZE) - - assert atom[4] == (total_toks + n_seen_tokens + KV_BLOCK_SIZE - 1) // KV_BLOCK_SIZE - assert atom[5] == (total_toks + n_seen_tokens) - - assert atom[6] == n_seen_tokens + i * Q_BLOCK_SIZE diff --git a/tests/unit/inference/kernels/ragged_ops/test_blocked_flash.py b/tests/unit/inference/kernels/ragged_ops/test_blocked_flash.py deleted file mode 100644 index a16a7775e964..000000000000 --- a/tests/unit/inference/kernels/ragged_ops/test_blocked_flash.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import itertools - -from typing import List, Tuple - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.inference_utils import DtypeEnum -from deepspeed.inference.v2.kernels.ragged_ops import ( - AtomBuilder, - BlockedFlashAttn, - get_q_block_size, - get_kv_block_size, - LinearBlockedKVCopy, -) -from deepspeed.inference.v2.ragged import split_kv -from deepspeed.ops.op_builder import RaggedUtilsBuilder - -from .ragged_testing_utils import build_batch_and_manager -from ...inference_test_utils import allclose - -try: - from flash_attn.flash_attn_interface import flash_attn_varlen_func - validate_accuracy = True -except ImportError: - validate_accuracy = False -""" -NOTE(cmikeh2): These tests depend on atom construction and KV-cache copying to behave correctly. -If one or the other of those is not working, then these tests will fail. Before debugging here, -make sure that the atom construction and KV-cache copying tests are passing. -""" - - -def _blocked_flash_testing_helper(head_size: int, n_heads_q: int, n_heads_kv: int, - seq_params: List[Tuple[int, int]]) -> None: - """ - Helper function for testing blocked flash attention. Used to enable parametrize to only set up - a subset of parameters before being passed to the unified test function. - """ - q_block_size = get_q_block_size(head_size) - kv_block_size = get_kv_block_size(head_size) - - kvs = [] - for _, history_len in seq_params: - if history_len > 0: - kvs.append( - torch.randn((history_len, 2 * n_heads_kv * head_size), - device=get_accelerator().current_device(), - dtype=torch.float16)) - else: - kvs.append(None) - - batch, state_manager, _ = build_batch_and_manager(seq_params, head_size, n_heads_kv, kv_block_size, kv_fill=kvs) - - atom_builder = AtomBuilder() - kv_copy = LinearBlockedKVCopy(head_size, n_heads_q, n_heads_kv, DtypeEnum.fp16) - atom_flash = BlockedFlashAttn(head_size, DtypeEnum.fp16) - - total_atoms = sum((seq[0] + q_block_size - 1) // q_block_size for seq in seq_params) - atoms = torch.empty((total_atoms, 8), dtype=torch.int32, device=get_accelerator().current_device()) - alloc_func = RaggedUtilsBuilder().load().allocate_fast_host_buffer - atoms_host = alloc_func(atoms) - - qkv = torch.randn((batch.current_tokens, (n_heads_q + 2 * n_heads_kv) * head_size), - device=get_accelerator().current_device(), - dtype=torch.float16) - - atoms_host, n_atoms = atom_builder(atoms_host, batch, q_block_size, kv_block_size) - atoms.copy_(atoms_host[:n_atoms]) - - kv_cache = state_manager.get_cache(0) - kv_copy(kv_cache, qkv, batch) - - out = torch.empty((batch.current_tokens, head_size * n_heads_q), - device=get_accelerator().current_device(), - dtype=torch.float16) - k_cache, v_cache = split_kv(kv_cache) - q = qkv[:, :head_size * n_heads_q] - - atom_flash(out, q, k_cache, v_cache, atoms, 1.0) - - if validate_accuracy: - cu_seqlens_q = torch.tensor([0] + list(itertools.accumulate([seq[0] for seq in seq_params])), - dtype=torch.int32, - device=get_accelerator().current_device()) - cu_seqlens_kv = torch.tensor([0] + list(itertools.accumulate([seq[1] + seq[0] for seq in seq_params])), - dtype=torch.int32, - device=get_accelerator().current_device()) - - inflight_kv = qkv[:, head_size * n_heads_q:] - full_kvs = [] - for i, kv in enumerate(kvs): - if kv is not None: - full_kvs.append(torch.cat([kv, inflight_kv[cu_seqlens_q[i]:cu_seqlens_q[i + 1]]], dim=0)) - else: - full_kvs.append(inflight_kv[cu_seqlens_q[i]:cu_seqlens_q[i + 1]]) - run_kvs = torch.cat(full_kvs, dim=0) - k = run_kvs[:, :head_size * n_heads_kv] - v = run_kvs[:, head_size * n_heads_kv:] - - q_ref = q.reshape((batch.current_tokens, n_heads_q, head_size)) - k_ref = k.reshape((k.shape[0], n_heads_kv, head_size)) - v_ref = v.reshape((v.shape[0], n_heads_kv, head_size)) - - max_seqlen_q = max([seq[0] for seq in seq_params]) - max_seqlen_kv = max([seq[1] + seq[0] for seq in seq_params]) - - ref_o = flash_attn_varlen_func(q_ref, - k_ref, - v_ref, - cu_seqlens_q, - cu_seqlens_kv, - max_seqlen_q, - max_seqlen_kv, - softmax_scale=1.0, - causal=True) - - ref_o = ref_o.reshape(batch.current_tokens, head_size * n_heads_q) - - assert allclose(out, ref_o) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("n_tokens", [2, 33, 65, 128, 256, 2037]) -def test_single_prompt(n_tokens: int) -> None: - head_size = 64 - n_heads_q = 16 - n_heads_kv = 16 - - seq_params = [(n_tokens, 0)] - _blocked_flash_testing_helper(head_size, n_heads_q, n_heads_kv, seq_params) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("prompt_lengths", [(128, 128), (192, 38), (514, 713), (83, 312, 610)]) -def test_multiple_prompts(prompt_lengths: Tuple[int, int]) -> None: - """ - Test multiple prompts in a single batch. - """ - head_size = 64 - n_heads_q = 16 - n_heads_kv = 16 - - seq_params = [(prompt_lengths[i], 0) for i in range(len(prompt_lengths))] - _blocked_flash_testing_helper(head_size, n_heads_q, n_heads_kv, seq_params) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("seq_params", [(1, 34), (43, 40), (1, 144), (64, 128), (332, 628)]) -def test_continuation(seq_params: Tuple[int, int]) -> None: - """ - Test continued generation/prompt processing. - """ - head_size = 64 - n_heads_q = 32 - n_heads_kv = 32 - - _blocked_flash_testing_helper(head_size, n_heads_q, n_heads_kv, [seq_params]) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("head_size", [64, 128]) -def test_head_size(head_size: int) -> None: - n_heads_q = 16 - n_heads_kv = 16 - seq_params = [(128, 128), (192, 38), (1, 814)] - - _blocked_flash_testing_helper(head_size, n_heads_q, n_heads_kv, seq_params) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("head_config", [(32, 8), (64, 16), (40, 8)]) -def test_gqa(head_config: Tuple[int, int]) -> None: - head_size = 128 - n_heads_q = head_config[0] - n_heads_kv = head_config[1] - - seq_params = [(128, 128), (192, 38), (1, 814)] - - _blocked_flash_testing_helper(head_size, n_heads_q, n_heads_kv, seq_params) - - -@pytest.mark.inference_v2_ops -def test_fully_composed() -> None: - head_size = 64 - n_heads_q = 16 - n_heads_kv = 16 - - seq_params = [(332, 628), (1, 718), (1, 323), (180, 5), (224, 0)] - - _blocked_flash_testing_helper(head_size, n_heads_q, n_heads_kv, seq_params) diff --git a/tests/unit/inference/kernels/ragged_ops/test_blocked_kv_copy.py b/tests/unit/inference/kernels/ragged_ops/test_blocked_kv_copy.py deleted file mode 100644 index 90fe26eb4490..000000000000 --- a/tests/unit/inference/kernels/ragged_ops/test_blocked_kv_copy.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.kernels.ragged_ops import LinearBlockedKVCopy -from .ragged_testing_utils import build_batch_and_manager, validate_kv_cache - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("n_tokens, history_size", [(1, 0), (17, 0), (33, 8), (63, 1)]) -def test_single_sequence_single_block(n_tokens: int, history_size: int): - """ - Validate that the copy works correctly - """ - head_size = 64 - n_heads_q = 16 - n_heads_kv = 16 - kv_block_size = 64 - device = get_accelerator().current_device() - - batch, state_manager, seq_descs = build_batch_and_manager([(n_tokens, history_size)], head_size, n_heads_kv, - kv_block_size) - - assert batch.current_sequences == 1 - assert batch.current_tokens == n_tokens - - qkv = torch.randn((batch.current_tokens, (n_heads_q + 2 * n_heads_kv) * head_size), - device=device, - dtype=torch.float16) - - kv_cache = state_manager.get_cache(0) - - copy_impl = LinearBlockedKVCopy(head_size, n_heads_q, n_heads_kv, torch.float16) - copy_impl(kv_cache, qkv, batch) - - k = qkv[:, head_size * n_heads_q:head_size * (n_heads_q + n_heads_kv)] - v = qkv[:, head_size * (n_heads_q + n_heads_kv):] - - validate_kv_cache(kv_cache, k, v, seq_descs, batch) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("n_tokens, history_size", [(128, 0), (177, 0), (169, 8), (117, 88)]) -def test_single_sequence_multiple_blocks(n_tokens: int, history_size: int): - """ - Validate that the copy works correctly - """ - head_size = 64 - n_heads_q = 16 - n_heads_kv = 16 - kv_block_size = 64 - device = get_accelerator().current_device() - - batch, state_manager, seq_descs = build_batch_and_manager([(n_tokens, history_size)], head_size, n_heads_kv, - kv_block_size) - - assert batch.current_sequences == 1 - assert batch.current_tokens == n_tokens - - qkv = torch.randn((batch.current_tokens, (n_heads_q + 2 * n_heads_kv) * head_size), - device=device, - dtype=torch.float16) - - kv_cache = state_manager.get_cache(0) - - copy_impl = LinearBlockedKVCopy(head_size, n_heads_q, n_heads_kv, torch.float16) - copy_impl(kv_cache, qkv, batch) - - k = qkv[:, head_size * n_heads_q:head_size * (n_heads_q + n_heads_kv)] - v = qkv[:, head_size * (n_heads_q + n_heads_kv):] - - validate_kv_cache(kv_cache, k, v, seq_descs, batch) - - -@pytest.mark.inference_v2_ops -def test_multi_sequence() -> None: - head_size = 64 - n_heads_q = 16 - n_heads_kv = 16 - kv_block_size = 64 - device = get_accelerator().current_device() - - batch_config = [ - (128, 0), - (177, 0), - (169, 8), - (117, 88), - (1, 293), - (1, 733), - (1, 33), - ] - - batch, state_manager, seq_descs = build_batch_and_manager(batch_config, head_size, n_heads_kv, kv_block_size) - - qkv = torch.randn((batch.current_tokens, (n_heads_q + 2 * n_heads_kv) * head_size), - device=device, - dtype=torch.float16) - - kv_cache = state_manager.get_cache(0) - - copy_impl = LinearBlockedKVCopy(head_size, n_heads_q, n_heads_kv, torch.float16) - copy_impl(kv_cache, qkv, batch) - - k = qkv[:, head_size * n_heads_q:head_size * (n_heads_q + n_heads_kv)] - v = qkv[:, head_size * (n_heads_q + n_heads_kv):] - - validate_kv_cache(kv_cache, k, v, seq_descs, batch) diff --git a/tests/unit/inference/kernels/ragged_ops/test_blocked_rotary_emb.py b/tests/unit/inference/kernels/ragged_ops/test_blocked_rotary_emb.py deleted file mode 100644 index 35b92ef86305..000000000000 --- a/tests/unit/inference/kernels/ragged_ops/test_blocked_rotary_emb.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -from typing import List, Tuple - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.kernels.ragged_ops import BlockedRotaryEmbeddings, BlockedTrainedRotaryEmbeddings -from deepspeed.inference.v2.ragged import RaggedBatchWrapper, DSSequenceDescriptor -from .ragged_testing_utils import build_batch_and_manager, validate_kv_cache -from ...inference_test_utils import allclose -""" -NOTE(cmikeh2): It is very possible to see unit test failures (even on FP16) depending on when -certain values are casted up to or down from float32. If we are seeing accuracy issues, we should -make sure we are aligning on the training implementation's cast pattern here, given these tolerances -tend to be sufficient elsewhere. -""" - - -def rotary_pos_embs(q: torch.Tensor, k: torch.Tensor, seq_descs: List[DSSequenceDescriptor], batch: RaggedBatchWrapper, - head_size: int): - - def make_cos_sin_emb(seq_len: int) -> Tuple[torch.Tensor, torch.Tensor]: - t = torch.arange(seq_len, dtype=torch.float32, device=get_accelerator().current_device()) - inv_freq = (1.0 / (10000.0**(torch.arange( - 0, head_size, 2, dtype=torch.float32, device=get_accelerator().current_device()) / head_size))).half() - - freqs = torch.einsum("i,j->ij", t, inv_freq) - emb = torch.cat((freqs, freqs), dim=-1) - - return torch.cos(emb)[:, None, :], torch.sin(emb)[:, None, :], inv_freq - - def rotate_half(x: torch.Tensor) -> torch.Tensor: - return torch.cat((-x[..., x.shape[-1] // 2:], x[..., :x.shape[-1] // 2]), dim=-1) - - cos, sin, freqs = make_cos_sin_emb(1024) - - q_out = torch.empty_like(q) - k_out = torch.empty_like(k) - n_heads_q = q.shape[1] // head_size - n_heads_kv = k.shape[1] // head_size - - inflight_descs = batch.inflight_seq_descriptors(on_device=False)[:batch.current_sequences] - - if inflight_descs.shape[0] != len(seq_descs): - raise ValueError("The number of sequence descriptors does not match the number of sequences in the batch.") - - for seq_desc, inflight_seq in zip(seq_descs, inflight_descs): - start_idx = inflight_seq[0] - n_tokens = seq_desc.in_flight_tokens - - q_src = q[start_idx:start_idx + n_tokens].reshape(n_tokens, n_heads_q, head_size).float() - k_src = k[start_idx:start_idx + n_tokens].reshape(n_tokens, n_heads_kv, head_size).float() - freq_start_offset = seq_desc.seen_tokens - - cos_chunk = cos[range(freq_start_offset, freq_start_offset + n_tokens)] - sin_chunk = sin[range(freq_start_offset, freq_start_offset + n_tokens)] - - q_emb = q_src * cos_chunk + rotate_half(q_src) * sin_chunk - k_emb = k_src * cos_chunk + rotate_half(k_src) * sin_chunk - - q_out[start_idx:start_idx + n_tokens] = q_emb.reshape(n_tokens, n_heads_q * head_size).to(q_out.dtype) - k_out[start_idx:start_idx + n_tokens] = k_emb.reshape(n_tokens, n_heads_kv * head_size).to(k_out.dtype) - - return q_out, k_out, freqs - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("n_tokens, history_size", [(1, 0), (17, 0), (33, 15), (1, 63)]) -@pytest.mark.parametrize("trained_emb", [False, True]) -def test_single_sequence_single_block(n_tokens: int, history_size: int, trained_emb: bool): - """ - Validate that the copy works correctly - """ - head_size = 64 - n_heads_q = 16 - n_heads_kv = 16 - kv_block_size = 64 - device = get_accelerator().current_device() - - batch, state_manager, seq_descs = build_batch_and_manager([(n_tokens, history_size)], head_size, n_heads_kv, - kv_block_size) - - assert batch.current_sequences == 1 - assert batch.current_tokens == n_tokens - - qkv = torch.randn((batch.current_tokens, (n_heads_q + 2 * n_heads_kv) * head_size), - device=device, - dtype=torch.float16) - qkv_ref = qkv.clone() - - q = qkv_ref[:, :head_size * n_heads_q] - k = qkv_ref[:, head_size * n_heads_q:head_size * (n_heads_q + n_heads_kv)] - v = qkv_ref[:, head_size * (n_heads_q + n_heads_kv):] - - q_ref, k, freqs = rotary_pos_embs(q, k, seq_descs, batch, head_size) - freqs = freqs.half() - - kv_cache = state_manager.get_cache(0) - - if trained_emb: - copy_impl = BlockedTrainedRotaryEmbeddings(head_size, n_heads_q, n_heads_kv, torch.float16) - copy_impl(kv_cache, qkv, batch, freqs) - else: - copy_impl = BlockedRotaryEmbeddings(head_size, n_heads_q, n_heads_kv, torch.float16) - copy_impl(kv_cache, qkv, batch) - - assert allclose(qkv[:, :head_size * n_heads_q], q_ref) - validate_kv_cache(kv_cache, k, v, seq_descs, batch, exact=False) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("n_tokens, history_size", [(128, 0), (177, 0), (169, 8), (117, 88)]) -@pytest.mark.parametrize("trained_emb", [False, True]) -def test_single_sequence_multiple_blocks(n_tokens: int, history_size: int, trained_emb: bool): - """ - Validate that the copy works correctly - """ - head_size = 64 - n_heads_q = 16 - n_heads_kv = 16 - kv_block_size = 64 - device = get_accelerator().current_device() - - batch, state_manager, seq_descs = build_batch_and_manager([(n_tokens, history_size)], head_size, n_heads_kv, - kv_block_size) - - assert batch.current_sequences == 1 - assert batch.current_tokens == n_tokens - - qkv = torch.randn((batch.current_tokens, (n_heads_q + 2 * n_heads_kv) * head_size), - device=device, - dtype=torch.float16) - qkv_ref = qkv.clone() - - q = qkv_ref[:, :head_size * n_heads_q] - k = qkv_ref[:, head_size * n_heads_q:head_size * (n_heads_q + n_heads_kv)] - v = qkv_ref[:, head_size * (n_heads_q + n_heads_kv):] - - q_ref, k, freqs = rotary_pos_embs(q, k, seq_descs, batch, head_size) - freqs = freqs.half() - - kv_cache = state_manager.get_cache(0) - - if trained_emb: - copy_impl = BlockedTrainedRotaryEmbeddings(head_size, n_heads_q, n_heads_kv, torch.float16) - copy_impl(kv_cache, qkv, batch, freqs) - else: - copy_impl = BlockedRotaryEmbeddings(head_size, n_heads_q, n_heads_kv, torch.float16) - copy_impl(kv_cache, qkv, batch) - - assert allclose(qkv[:, :head_size * n_heads_q], q_ref) - validate_kv_cache(kv_cache, k, v, seq_descs, batch, exact=False) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("trained_emb", [False, True]) -def test_multi_sequences(trained_emb: bool) -> None: - head_size = 64 - n_heads_q = 16 - n_heads_kv = 16 - kv_block_size = 64 - device = get_accelerator().current_device() - - batch_config = [ - (128, 0), - (177, 0), - (169, 8), - (117, 88), - (1, 293), - (1, 733), - (1, 33), - ] - - batch, state_manager, seq_descs = build_batch_and_manager(batch_config, head_size, n_heads_kv, kv_block_size) - - qkv = torch.randn((batch.current_tokens, (n_heads_q + 2 * n_heads_kv) * head_size), - device=device, - dtype=torch.float16) - qkv_ref = qkv.clone() - - q = qkv_ref[:, :head_size * n_heads_q] - k = qkv_ref[:, head_size * n_heads_q:head_size * (n_heads_q + n_heads_kv)] - v = qkv_ref[:, head_size * (n_heads_q + n_heads_kv):] - - q_ref, k, freqs = rotary_pos_embs(q, k, seq_descs, batch, head_size) - freqs = freqs.half() - - kv_cache = state_manager.get_cache(0) - - if trained_emb: - copy_impl = BlockedTrainedRotaryEmbeddings(head_size, n_heads_q, n_heads_kv, torch.float16) - copy_impl(kv_cache, qkv, batch, freqs) - else: - copy_impl = BlockedRotaryEmbeddings(head_size, n_heads_q, n_heads_kv, torch.float16) - copy_impl(kv_cache, qkv, batch) - - assert allclose(qkv[:, :head_size * n_heads_q], q_ref) - validate_kv_cache(kv_cache, k, v, seq_descs, batch, exact=False) diff --git a/tests/unit/inference/kernels/ragged_ops/test_logits_gather.py b/tests/unit/inference/kernels/ragged_ops/test_logits_gather.py deleted file mode 100644 index 0208b733ab5b..000000000000 --- a/tests/unit/inference/kernels/ragged_ops/test_logits_gather.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -from typing import List - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.kernels.ragged_ops import RaggedLogitsGather -from ...inference_test_utils import allclose, get_dtypes -from .ragged_testing_utils import build_simple_batch - - -def baseline_implementation(hidden_states: torch.Tensor, seq_lens: List[int]) -> torch.Tensor: - output = torch.empty((len(seq_lens), hidden_states.shape[1]), - dtype=hidden_states.dtype, - device=hidden_states.device) - - offset = 0 - for i, seq_len in enumerate(seq_lens): - output[i] = hidden_states[offset + seq_len - 1] - offset += seq_len - - return output - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize('dtype', get_dtypes()) -def test_supported_dtypes(dtype: torch.dtype) -> None: - """ - Validate support on nominally supported data types. - """ - model_dim = 4096 - - batch = build_simple_batch([256], padding=False) - hidden_states = torch.randn((batch.current_tokens, model_dim), - dtype=dtype, - device=get_accelerator().current_device()) - - reference_result = baseline_implementation(hidden_states, [256]) - - kernel = RaggedLogitsGather(model_dim, dtype) - output = torch.empty_like(reference_result) - kernel(output, hidden_states, batch) - - assert allclose(output, reference_result) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize('seq_lens', [[128, 64, 192, 32], [57, 112, 63, 89, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], - [63, 27, 74, 83, 32, 17, 1, 1, 1, 1, 1]]) -def test_multiple_sequences(seq_lens: List[int]) -> None: - """ - Validate support on more multi-sequence inputs. - """ - model_dim = 4096 - dtype = torch.float16 - - batch = build_simple_batch(seq_lens, padding=False) - hidden_states = torch.randn((batch.current_tokens, model_dim), - dtype=dtype, - device=get_accelerator().current_device()) - - reference_result = baseline_implementation(hidden_states, seq_lens) - - kernel = RaggedLogitsGather(model_dim, dtype) - output = torch.empty_like(reference_result) - kernel(output, hidden_states, batch) - - assert allclose(output, reference_result) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("model_dim", [1024, 6144, 6784]) -def test_problem_size_permutations(model_dim: int) -> None: - """ - Validate for different embedding sizes. - """ - dtype = torch.float16 - seq_lens = [128, 64, 192, 32] - - batch = build_simple_batch(seq_lens, padding=False) - hidden_states = torch.randn((batch.current_tokens, model_dim), - dtype=dtype, - device=get_accelerator().current_device()) - - reference_result = baseline_implementation(hidden_states, seq_lens) - - kernel = RaggedLogitsGather(model_dim, dtype) - output = torch.empty_like(reference_result) - kernel(output, hidden_states, batch) - - assert allclose(output, reference_result) diff --git a/tests/unit/inference/kernels/ragged_ops/test_moe_gather.py b/tests/unit/inference/kernels/ragged_ops/test_moe_gather.py deleted file mode 100644 index 5fa375b49c19..000000000000 --- a/tests/unit/inference/kernels/ragged_ops/test_moe_gather.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.inference_utils import DtypeEnum -from deepspeed.inference.v2.kernels.ragged_ops import ( - MoEGather, - MoEScatter, - RaggedTop1Gating, -) -from .ragged_testing_utils import build_simple_batch -""" -For simplicity's sake, these tests do rely on ``RaggedTop1Gating`` and -``MoEScatter`` to produce correct inputs. If either of these kernels is broken -these tests will fail, so double check the unit test results there before -debugging here. -""" - - -def build_inputs(n_tokens, n_experts, do_padding): - - assert n_tokens <= 2048, "This test will break if n_tokens > 2048" - - # Sequence composition shouldn't matter here - batch = build_simple_batch([n_tokens], padding=do_padding) - - logits = torch.randn((batch.tensor_toks, n_experts), - dtype=torch.float16, - device=get_accelerator().current_device()) - - # This will make each token's value equal to its index. NOTE: This will break for - # tokens with index > 2048. - hidden_states = torch.arange(batch.tensor_toks, dtype=torch.float16, - device=get_accelerator().current_device()).repeat_interleave(4096, dim=0).reshape( - batch.tensor_toks, 4096).contiguous() - - gate = RaggedTop1Gating(DtypeEnum.fp16) - - # Gating outputs - expert_counts = torch.zeros((n_experts, ), dtype=torch.int32, device=get_accelerator().current_device()) - scores = torch.empty((batch.tensor_toks, ), dtype=torch.float32, device=get_accelerator().current_device()) - expert_assignment = torch.empty((batch.tensor_toks, ), - dtype=torch.int32, - device=get_accelerator().current_device()) - expert_offset = torch.empty((batch.tensor_toks, ), dtype=torch.int32, device=get_accelerator().current_device()) - - gate(expert_counts, scores, expert_assignment, expert_offset, logits, batch) - - # Scatter outputs - moe_input = torch.empty((batch.tensor_toks, 4096), dtype=torch.float16, device=get_accelerator().current_device()) - expert_cumsum = torch.empty((n_experts, ), dtype=torch.int64, device=get_accelerator().current_device()) - mapped_slots = torch.empty((batch.tensor_toks, ), dtype=torch.int32, device=get_accelerator().current_device()) - - scatter = MoEScatter(DtypeEnum.fp16, 4096) - scatter(moe_input, expert_cumsum, mapped_slots, hidden_states, expert_counts, expert_assignment, expert_offset) - - return batch, moe_input, scores, mapped_slots, expert_counts - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("n_tokens, n_experts", [(13, 64), (278, 64), (1977, 64)]) -@pytest.mark.parametrize("do_padding", [True, False]) -def test_moe_gather(n_tokens, n_experts, do_padding): - - batch, moe_input, scores, mapped_slots, expert_counts = build_inputs(n_tokens, n_experts, do_padding) - - output = torch.randn((batch.tensor_toks, 4096), dtype=torch.float16, device=get_accelerator().current_device()) - - gather = MoEGather(DtypeEnum.fp16, 4096) - gather(output, moe_input, scores, mapped_slots, expert_counts) - - for token_idx in range(n_tokens): - assert torch.equal( - output[token_idx], - torch.full((4096, ), - token_idx * scores[token_idx], - dtype=torch.float16, - device=get_accelerator().current_device())) diff --git a/tests/unit/inference/kernels/ragged_ops/test_moe_scatter.py b/tests/unit/inference/kernels/ragged_ops/test_moe_scatter.py deleted file mode 100644 index 4ca051410c1c..000000000000 --- a/tests/unit/inference/kernels/ragged_ops/test_moe_scatter.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.inference_utils import DtypeEnum -from deepspeed.inference.v2.kernels.ragged_ops import MoEScatter, RaggedTop1Gating -from .ragged_testing_utils import build_simple_batch -""" -For simplicity's sake, these tests do rely on ``RaggedTop1Gating`` to produce correct -inputs. If ``RaggedTop1Gating`` is broken, these tests will fail, so double check -the unit test results there before debugging here. -""" - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("n_tokens, n_experts", [(13, 64), (278, 64), (1977, 64)]) -@pytest.mark.parametrize("do_padding", [True, False]) -def test_moe_scatter(n_tokens, n_experts, do_padding): - - # Sequence composition shouldn't matter here - batch = build_simple_batch([n_tokens], padding=do_padding) - - logits = torch.randn((batch.tensor_toks, n_experts), - dtype=torch.float16, - device=get_accelerator().current_device()) - - # This will make each token's value equal to its index. NOTE: This will break for - # tokens with index > 2048. - hidden_states = torch.arange(batch.tensor_toks, dtype=torch.float16, - device=get_accelerator().current_device()).repeat_interleave(4096, dim=0).reshape( - batch.tensor_toks, 4096).contiguous() - - gate = RaggedTop1Gating(DtypeEnum.fp16) - - # Gating outputs - expert_counts = torch.zeros((n_experts, ), dtype=torch.int32, device=get_accelerator().current_device()) - scores = torch.empty((batch.tensor_toks, ), dtype=torch.float32, device=get_accelerator().current_device()) - expert_assignment = torch.empty((batch.tensor_toks, ), - dtype=torch.int32, - device=get_accelerator().current_device()) - expert_offset = torch.empty((batch.tensor_toks, ), dtype=torch.int32, device=get_accelerator().current_device()) - - gate(expert_counts, scores, expert_assignment, expert_offset, logits, batch) - - # Scatter outputs - moe_input = torch.empty((batch.tensor_toks, 4096), dtype=torch.float16, device=get_accelerator().current_device()) - expert_cumsum = torch.empty((n_experts, ), dtype=torch.int64, device=get_accelerator().current_device()) - mapped_slots = torch.empty((batch.tensor_toks, ), dtype=torch.int32, device=get_accelerator().current_device()) - - scatter = MoEScatter(DtypeEnum.fp16, 4096) - scatter(moe_input, expert_cumsum, mapped_slots, hidden_states, expert_counts, expert_assignment, expert_offset) - assert torch.equal(expert_cumsum, torch.cumsum(expert_counts, dim=0).to(torch.int64)) - - for token_idx in range(batch.tensor_toks): - if token_idx < n_tokens: - expert_idx = expert_assignment[token_idx].item() - if expert_idx == 0: - expert_cumsum_val = 0 - else: - expert_cumsum_val = expert_cumsum[expert_idx - 1] - offset = expert_offset[token_idx] - total_offset = offset + expert_cumsum_val - - assert total_offset == mapped_slots[token_idx].item() - assert torch.equal(moe_input[total_offset], hidden_states[token_idx]) - else: - assert mapped_slots[token_idx].item() == -1 - - assert expert_cumsum[-1] == n_tokens diff --git a/tests/unit/inference/kernels/ragged_ops/test_ragged_embed.py b/tests/unit/inference/kernels/ragged_ops/test_ragged_embed.py deleted file mode 100644 index 94f3f143274e..000000000000 --- a/tests/unit/inference/kernels/ragged_ops/test_ragged_embed.py +++ /dev/null @@ -1,177 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -from typing import List, Optional, Tuple - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.kernels.ragged_ops import RaggedEmbeddingKernel -from ...inference_test_utils import allclose, get_dtypes -from .ragged_testing_utils import build_batch_and_manager - - -def baseline_implementation(token_ids: torch.Tensor, - embedding_table: torch.Tensor, - unpadded_size: int, - positional_embedding_table: Optional[torch.Tensor] = None, - positional_ids: Optional[torch.Tensor] = None) -> torch.Tensor: - """ - Baseline implementation for our ragged embedding kernel. - """ - if unpadded_size == token_ids.shape[0]: - token_embed = torch.nn.functional.embedding(token_ids, embedding_table) - - if positional_embedding_table is not None: - pos_embed = torch.nn.functional.embedding(positional_ids, positional_embedding_table) - token_embed += pos_embed - return token_embed - else: - real_token_ids = token_ids[:unpadded_size] - output = torch.empty((token_ids.shape[0], embedding_table.shape[1]), - dtype=embedding_table.dtype, - device=get_accelerator().current_device()) - unpadded_output = torch.nn.functional.embedding(real_token_ids, embedding_table) - - # Positional embeddings aren't padded because it's simulated - if positional_embedding_table is not None: - pos_embed = torch.nn.functional.embedding(positional_ids, positional_embedding_table) - unpadded_output += pos_embed - - output[:unpadded_size] = unpadded_output - return output - - -def _ragged_embed_test_helper(sequence_config: List[Tuple[int, int]], - embed_dtype: torch.dtype, - token_dtype: torch.dtype, - embed_dim: int, - vocab_size: int, - do_padding: bool = False, - pos_embed_size: int = -1, - pos_embed_offset: int = 0) -> None: - """ - Helper for embedding test to limit the number of tests to run. - - Params: - embed_dim (int): Model dimension - vocab_size (int): Leading dimension on embedding weight - pos_embed_size (int): Size of positional embedding. If negative, no positional embedding - is used. - pos_embed_offset (int): Offset for positional embedding. Effectively, the raw offsets - of a token into a sequence are offset by this amount into the embedding matrix. ( - i.e. the shape of the positional embeddings is (pos_embed_size + pos_embed_offset - embed_dim) - """ - device = get_accelerator().current_device() - - # Heads/Block size are irrelevant here but need something. - batch, _, _, = build_batch_and_manager(sequence_config, 64, 16, 64, vocab_range=vocab_size, padding=do_padding) - - embedding_table = torch.randn((vocab_size, embed_dim), dtype=embed_dtype, device=device) - - if pos_embed_size > 0: - pos_embedding_table = torch.randn((pos_embed_size + pos_embed_offset, embed_dim), - dtype=embed_dtype, - device=device) - positional_ids = torch.cat([ - torch.arange(start_idx, start_idx + seq_len, dtype=token_dtype, device=device) - for seq_len, start_idx in sequence_config - ]) + pos_embed_offset - else: - pos_embedding_table = None - positional_ids = None - - baseline_output = baseline_implementation(batch.input_ids().to(token_dtype), embedding_table, batch.current_tokens, - pos_embedding_table, positional_ids) - - kernel = RaggedEmbeddingKernel(embed_dtype, token_dtype, embed_dim) - output = torch.empty_like(baseline_output) - - kernel(output, - batch, - embedding_table, - position_embed_weight=pos_embedding_table, - position_embed_offset=pos_embed_offset) - - if do_padding: - assert output.shape[0] != batch.current_tokens - - assert allclose(output[:batch.current_tokens], baseline_output[:batch.current_tokens]) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize('token_dtype', [torch.int32, torch.int64]) -@pytest.mark.parametrize('embed_dtype', get_dtypes()) -def test_dtype_permutations(token_dtype: torch.dtype, embed_dtype: torch.dtype) -> None: - """ - Validate (on a single problem size) that the kernel support for different data types - is correct. - """ - embed_dim = 4096 - vocab_size = 50304 - - _ragged_embed_test_helper([(256, 0)], embed_dtype, token_dtype, embed_dim, vocab_size) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize('vocab_size, embed_dim', [(1024, 1024), (32000, 5120), (50304, 6144)]) -def test_problem_size_permutations(vocab_size: int, embed_dim: int) -> None: - """ - Validate on wider range of problem sizes. - """ - - _ragged_embed_test_helper([(256, 0)], torch.float16, torch.int32, embed_dim, vocab_size) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize('seq_lens', [[128, 64, 192, 32], [57, 112, 63, 89, 1, 1, 1, 1]]) -@pytest.mark.parametrize('do_padding', [True, False]) -def test_complex_sequences(seq_lens: List[int], do_padding: bool) -> None: - """ - Validate on different ragged batch construction scenarios. - """ - embed_dim = 4096 - vocab_size = 50304 - - _ragged_embed_test_helper([(seq_len, 0) for seq_len in seq_lens], - torch.float16, - torch.int32, - embed_dim, - vocab_size, - do_padding=do_padding) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("seq_lens", [[(256, 0)], [(256, 0), - (128, 0)], [(256, 0), (128, 0), - (64, 0)], [(1, 877), (619, 0), (213, 372), (1, 45)]]) -def test_positional_embedding(seq_lens: List[Tuple[int, int]]) -> None: - """ - Validate that positional embedding works correctly. - """ - embed_dim = 4096 - vocab_size = 50304 - - _ragged_embed_test_helper(seq_lens, torch.float16, torch.int32, embed_dim, vocab_size, pos_embed_size=2048) - - -@pytest.mark.inference_v2_ops -def test_positional_embedding_offset() -> None: - """ - Validate that positional embedding works correctly with an offset. - """ - embed_dim = 4096 - vocab_size = 50304 - seq_config = [(1, 877), (619, 0), (213, 372), (1, 45)] - - _ragged_embed_test_helper(seq_config, - torch.float16, - torch.int32, - embed_dim, - vocab_size, - pos_embed_size=2048, - pos_embed_offset=2) diff --git a/tests/unit/inference/kernels/ragged_ops/test_top_1_gating.py b/tests/unit/inference/kernels/ragged_ops/test_top_1_gating.py deleted file mode 100644 index 96bf28eea7ad..000000000000 --- a/tests/unit/inference/kernels/ragged_ops/test_top_1_gating.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch -import torch.nn.functional as F - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.inference_utils import DtypeEnum -from deepspeed.inference.v2.kernels.ragged_ops import RaggedTop1Gating -from .ragged_testing_utils import build_simple_batch -from ...inference_test_utils import allclose - - -def _test_single_mapping_helper(n_tokens: int, - n_experts: int, - assigned_expert: int, - logit_fill: float = 0.0, - match_fill: float = 1.0) -> None: - logits = torch.full((n_tokens, n_experts), - logit_fill, - dtype=torch.float16, - device=get_accelerator().current_device()) - - logits[:, assigned_expert] = match_fill - - gate = RaggedTop1Gating(DtypeEnum.fp16) - - expert_counts = torch.zeros((n_experts, ), dtype=torch.int32, device=get_accelerator().current_device()) - scores = torch.empty((n_tokens, ), dtype=torch.float32, device=get_accelerator().current_device()) - expert_assignment = torch.empty((n_tokens, ), dtype=torch.int32, device=get_accelerator().current_device()) - expert_offset = torch.empty((n_tokens, ), dtype=torch.int32, device=get_accelerator().current_device()) - batch = build_simple_batch([n_tokens], padding=False) - - gate(expert_counts, scores, expert_assignment, expert_offset, logits, batch) - - assert expert_counts[assigned_expert] == n_tokens - assert torch.all(expert_assignment == assigned_expert) - assert torch.unique(expert_offset).shape[0] == n_tokens - assert allclose(scores, F.softmax(logits.float(), dim=1)[:, assigned_expert]) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize('n_tokens, n_experts', [(1, 16), (17, 16), (32, 128), (89, 128), (433, 128)]) -def test_single_mapping_gating(n_tokens: int, n_experts: int) -> None: - """ - Evaluate our expert stacking behavior in complete isolation. This ensures all tokens - mapped to the same expert are getting unique offsets and identical scores. - """ - assigned_expert = 13 - _test_single_mapping_helper(n_tokens, n_experts, assigned_expert) - - -@pytest.mark.inference_v2_ops -def test_negative_logits(): - """ - Ensure that scores/values are propagated correctly when all the logits are negative. An - earlier implementation of the scoring would return NaN for this case. - """ - _test_single_mapping_helper(128, 32, 13, logit_fill=-2.0, match_fill=-1.0) - - -@pytest.mark.inference_v2_ops -def test_determinism(): - """ - Ensure that ties between two logits are broken deterministically. This is essential when - the gating is distributed across multiple devices that need to map the same token to - the same expert. - """ - - n_tokens = 512 - n_experts = 64 - - logits = torch.zeros((n_tokens, n_experts), dtype=torch.float16, device=get_accelerator().current_device()) - batch = build_simple_batch([n_tokens], padding=False) - - logits[:, 19] = 1.0 - logits[:, 26] = 1.0 - - gate = RaggedTop1Gating(DtypeEnum.fp16) - - for _ in range(1024): - expert_counts = torch.zeros((n_experts, ), dtype=torch.int32, device=get_accelerator().current_device()) - scores = torch.empty((n_tokens, ), dtype=torch.float32, device=get_accelerator().current_device()) - expert_assignment = torch.empty((n_tokens, ), dtype=torch.int32, device=get_accelerator().current_device()) - expert_offset = torch.empty((n_tokens, ), dtype=torch.int32, device=get_accelerator().current_device()) - batch = build_simple_batch([n_tokens], padding=False) - - gate(expert_counts, scores, expert_assignment, expert_offset, logits, batch) - - assert expert_counts[19] == n_tokens - assert expert_counts[26] == 0 - assert torch.all(expert_assignment == 19) - assert torch.unique(expert_offset).shape[0] == n_tokens - assert allclose(scores, F.softmax(logits.float(), dim=1)[:, 19]) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize('n_tokens, n_experts', [(1, 16), (17, 16), (32, 128), (89, 128), (433, 2)]) -def test_score_accuracy(n_tokens: int, n_experts: int) -> None: - """ - Validate expert scores are correct. - """ - logits = torch.randn((n_tokens, n_experts), dtype=torch.float16, device=get_accelerator().current_device()) - batch = build_simple_batch([n_tokens], padding=False) - - gate = RaggedTop1Gating(DtypeEnum.fp16) - - expert_counts = torch.zeros((n_experts, ), dtype=torch.int32, device=get_accelerator().current_device()) - scores = torch.empty((n_tokens, ), dtype=torch.float32, device=get_accelerator().current_device()) - expert_assignment = torch.empty((n_tokens, ), dtype=torch.int32, device=get_accelerator().current_device()) - expert_offset = torch.empty((n_tokens, ), dtype=torch.int32, device=get_accelerator().current_device()) - - ref_scores = F.softmax(logits.float(), dim=1).max(dim=1).values - - gate(expert_counts, scores, expert_assignment, expert_offset, logits, batch) - assert allclose(scores, ref_scores) - assert expert_counts.sum() == n_tokens diff --git a/tests/unit/inference/model_implementations/__init__.py b/tests/unit/inference/model_implementations/__init__.py deleted file mode 100644 index 208299fb8c50..000000000000 --- a/tests/unit/inference/model_implementations/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team diff --git a/tests/unit/inference/model_implementations/parameters/__init__.py b/tests/unit/inference/model_implementations/parameters/__init__.py deleted file mode 100644 index 208299fb8c50..000000000000 --- a/tests/unit/inference/model_implementations/parameters/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team diff --git a/tests/unit/inference/model_implementations/parameters/test_layer_inheritance.py b/tests/unit/inference/model_implementations/parameters/test_layer_inheritance.py deleted file mode 100644 index 20803e53a320..000000000000 --- a/tests/unit/inference/model_implementations/parameters/test_layer_inheritance.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer - -from .utils import validate_device, SimpleParam, DummyInferenceModel - - -class ParentLayer(LayerContainer): - """ - A layer that has a dependency on a simple parameter. - """ - - param_1: SimpleParam - - -class ChildLayer(ParentLayer): - """ - A layer that inherits from another layer. - """ - - param_2: SimpleParam - - -@pytest.mark.inference_v2 -def test_layer_inheritance(): - inference_model = DummyInferenceModel() - - multi_param_layer = ChildLayer(inference_model) - - assert multi_param_layer.n_params == 2 - assert multi_param_layer.is_initialized is False - - multi_param_layer.param_1.param = torch.ones(16, 16) - - assert multi_param_layer.is_initialized is False - - multi_param_layer.param_2.param = torch.full((16, 16), 2.0) - - assert multi_param_layer.is_initialized is True - assert isinstance(multi_param_layer.param_1, torch.Tensor) - assert isinstance(multi_param_layer.param_2, torch.Tensor) - - validate_device(multi_param_layer.param_1) - validate_device(multi_param_layer.param_2) diff --git a/tests/unit/inference/model_implementations/parameters/test_mapping.py b/tests/unit/inference/model_implementations/parameters/test_mapping.py deleted file mode 100644 index 3c74d7a0479a..000000000000 --- a/tests/unit/inference/model_implementations/parameters/test_mapping.py +++ /dev/null @@ -1,165 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from deepspeed.inference.v2.allocator import on_device -from deepspeed.inference.v2.model_implementations.parameter_base import ParameterBase, ParamList -from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer - - -class MultiDependencyContainer(ParameterBase): - - dependency_1: torch.Tensor - - dependency_2: torch.Tensor - - @on_device - def finalize(self) -> torch.Tensor: - return torch.cat([self.dependency_1, self.dependency_2]) - - -class ListDependencyContainer(ParameterBase): - - dependencies: ParamList("list_items") # noqa: F821 - - @on_device - def finalize(self) -> torch.Tensor: - return torch.cat(tuple(self.dependencies)) - - -class MappingLayer(LayerContainer): - PARAM_MAPPING = { - "model.val.item.d_1": "multi_depend.dependency_1", - "model.val.item.d_2": "multi_depend.dependency_2", - "model.list_vals.*.d": "list_depend.dependencies" - } - - multi_depend: MultiDependencyContainer - - list_depend: ListDependencyContainer - - -class SubMappingLayer(MappingLayer): - PARAM_MAPPING = { - "model.val.item2.d_1": "multi_depend2.dependency_1", - "model.val.item2.d_2": "multi_depend2.dependency_2", - } - - multi_depend2: MultiDependencyContainer - - -class DoubleMappingLayer(LayerContainer): - PARAM_MAPPING = { - "model.val.item.d_1": ["multi_depend.dependency_1", "multi_depend.dependency_2"], - } - - multi_depend: MultiDependencyContainer - - -class InferenceModel: - - @property - def list_items(self) -> int: - return 16 - - -@pytest.mark.inference_v2 -def test_mapping_syntax(): - model = InferenceModel() - - mapping_layer = MappingLayer(model) - - mapping_layer.set_dependency("model.val.item.d_1", torch.ones(1)) - mapping_layer.set_dependency("model.val.item.d_2", torch.ones(1) * 2) - - assert isinstance(mapping_layer.multi_depend, torch.Tensor) - - for i in range(16): - mapping_layer.set_dependency(f"model.list_vals.{i}.d", torch.ones(1) * i) - if i != 16 - 1: - assert mapping_layer.is_initialized == False - - assert isinstance(mapping_layer.list_depend, torch.Tensor) - assert mapping_layer.is_initialized == True - - -@pytest.mark.inference_v2 -def test_sub_mapping_syntax(): - model = InferenceModel() - - mapping_layer = SubMappingLayer(model) - - mapping_layer.set_dependency("model.val.item.d_1", torch.ones(1)) - mapping_layer.set_dependency("model.val.item.d_2", torch.ones(1) * 2) - - assert isinstance(mapping_layer.multi_depend, torch.Tensor) - - mapping_layer.set_dependency("model.val.item2.d_1", torch.ones(1)) - mapping_layer.set_dependency("model.val.item2.d_2", torch.ones(1) * 2) - - assert isinstance(mapping_layer.multi_depend2, torch.Tensor) - - # We want to check into double digits to make sure that this isn't specific - # to single difit indexing. - for i in range(16): - mapping_layer.set_dependency(f"model.list_vals.{i}.d", torch.ones(1) * i) - if i != 16 - 1: - assert mapping_layer.is_initialized == False - - assert isinstance(mapping_layer.list_depend, torch.Tensor) - assert mapping_layer.is_initialized == True - - -@pytest.mark.inference_v2 -def test_double_mapping_syntax(): - model = InferenceModel() - - mapping_layer = DoubleMappingLayer(model) - mapping_layer.set_dependency("model.val.item.d_1", torch.ones(1)) - - # The single parameter setting should immediately make the parameter finalized - # and the whole layer initialized. - assert isinstance(mapping_layer.multi_depend, torch.Tensor) - assert mapping_layer.is_initialized == True - - -@pytest.mark.inference_v2 -def test_insufficient_mapping_syntax(): - """ - In the above example, we don't have a mapping for `multi_depend2.dependency_2`. - """ - - with pytest.raises(ValueError): - - class InsuffienctMappingLayer(LayerContainer): - PARAM_MAPPING = { - "model.val.item.d_1": "multi_depend1.dependency_1", - "model.val.item.d_2": "multi_depend1.dependency_2", - "model.val.item2.d_1": "multi_depend2.dependency_1", - } - - multi_depend1: MultiDependencyContainer - - multi_depend2: MultiDependencyContainer - - -@pytest.mark.inference_v2 -def test_unknown_target_mapping_syntax(): - """ - In the above example, `multi_depend_unknown` does not exist - """ - - with pytest.raises(ValueError): - - class UnknownTargetMappingLayer(LayerContainer): - PARAM_MAPPING = { - "model.val.item.d_1": "multi_depend1.dependency_1", - "model.val.item.d_2": "multi_depend1.dependency_2", - "model.val.item2.d_1": "multi_depend_unknown.dependency_1", - } - - multi_depend: MultiDependencyContainer diff --git a/tests/unit/inference/model_implementations/parameters/test_multi_parameter_layer.py b/tests/unit/inference/model_implementations/parameters/test_multi_parameter_layer.py deleted file mode 100644 index 6bfc04e97c30..000000000000 --- a/tests/unit/inference/model_implementations/parameters/test_multi_parameter_layer.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer - -from .utils import validate_device, SimpleParam, ListParam, DummyInferenceModel - - -class MultiParameterLayer(LayerContainer): - """ - Two dependencies, both of which are simple parameters. - """ - - param_1: SimpleParam - - param_2: SimpleParam - - -class MixedMultiParameterLayer(LayerContainer): - """ - Two dependencies, one of which is a simple parameter, the other is a list parameter. - """ - - param_1: SimpleParam - - param_2: ListParam - - -@pytest.mark.inference_v2 -def test_multi_parameter_layer(): - inference_model = DummyInferenceModel() - - multi_param_layer = MultiParameterLayer(inference_model) - - assert multi_param_layer.n_params == 2 - assert multi_param_layer.is_initialized is False - - multi_param_layer.param_1.param = torch.ones(16, 16) - - assert multi_param_layer.is_initialized is False - - multi_param_layer.param_2.param = torch.full((16, 16), 2.0) - - assert multi_param_layer.is_initialized is True - assert isinstance(multi_param_layer.param_1, torch.Tensor) - assert isinstance(multi_param_layer.param_2, torch.Tensor) - - validate_device(multi_param_layer.param_1) - validate_device(multi_param_layer.param_2) - - -@pytest.mark.inference_v2 -def test_mixed_multi_parameter_layer(): - inference_model = DummyInferenceModel() - - mixed_multi_param_layer = MixedMultiParameterLayer(inference_model) - - assert mixed_multi_param_layer.n_params == 2 - assert mixed_multi_param_layer.is_initialized is False - - mixed_multi_param_layer.param_2.params[1] = torch.full((16, 16), 2.0) - assert mixed_multi_param_layer.is_initialized is False - assert not isinstance(mixed_multi_param_layer.param_2, torch.Tensor) - - mixed_multi_param_layer.param_1.param = torch.ones(16, 16) - assert mixed_multi_param_layer.is_initialized is False - assert isinstance(mixed_multi_param_layer.param_1, torch.Tensor) - - validate_device(mixed_multi_param_layer.param_1) - - mixed_multi_param_layer.param_2.params[0] = torch.full((16, 16), 2.0) - - assert mixed_multi_param_layer.is_initialized is True - assert isinstance(mixed_multi_param_layer.param_2, torch.Tensor) - - validate_device(mixed_multi_param_layer.param_2) - - -class NoCopyInferenceModel: - - @property - def num_dependencies(self) -> int: - return 2 - - def transform(self, param: torch.Tensor) -> torch.Tensor: - return param - - -@pytest.mark.inference_v2 -def test_device_validation(): - inference_model = NoCopyInferenceModel() - - multi_param_layer = MultiParameterLayer(inference_model) - - assert multi_param_layer.n_params == 2 - assert multi_param_layer.is_initialized is False - - multi_param_layer.param_1.param = torch.ones(16, 16) - - assert multi_param_layer.is_initialized is False - - multi_param_layer.param_2.param = torch.full((16, 16), 2.0) - - with pytest.raises(RuntimeError): - # NoCopyInference model did not copy the parameters, so the device validation should fail. - assert multi_param_layer.is_initialized is True diff --git a/tests/unit/inference/model_implementations/parameters/test_parameter_list.py b/tests/unit/inference/model_implementations/parameters/test_parameter_list.py deleted file mode 100644 index 42edd90595fa..000000000000 --- a/tests/unit/inference/model_implementations/parameters/test_parameter_list.py +++ /dev/null @@ -1,104 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from deepspeed.inference.v2.model_implementations.parameter_base import ParameterBase, ParamList -from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer -from deepspeed.inference.v2.model_implementations.common_parameters import * -from deepspeed.inference.v2.allocator import on_device - -from .utils import validate_device - - -class SimpleMoELayer(LayerContainer): - - moe_mlp_1: UnfusedMoEMLP1Parameter - - -class DummyInferenceModel: - - def __init__(self, experts_per_rank: int) -> None: - self._num_experts = experts_per_rank - - @property - def num_experts(self) -> int: - return self._num_experts - - @on_device - def transform_moe_mlp_1_param(self, param: torch.Tensor) -> torch.Tensor: - return param - - -@pytest.mark.inference_v2 -def test_simple_moe_layer(): - - inference_model = DummyInferenceModel(experts_per_rank=2) - - simple_moe_layer = SimpleMoELayer(inference_model) - - assert simple_moe_layer.moe_mlp_1.experts[0] is None - assert simple_moe_layer.moe_mlp_1.experts[1] is None - - # Set the first expert - simple_moe_layer.moe_mlp_1.experts[0] = torch.zeros(16, 16) - - assert simple_moe_layer.moe_mlp_1.experts[0] is not None - assert simple_moe_layer.moe_mlp_1.experts[1] is None - - assert not simple_moe_layer.is_initialized - - # Set the second expert - simple_moe_layer.moe_mlp_1.experts[1] = torch.ones(16, 16) - - # We have all the experts, so the layer should be initialized - assert simple_moe_layer.is_initialized - assert isinstance(simple_moe_layer.moe_mlp_1, torch.Tensor) - - validate_device(simple_moe_layer.moe_mlp_1) - - -""" -Check that we can mix the number of elements in lists in the same context and have that -be tracked correctly. -""" - - -class CustomListParam1(ParameterBase): - - deps: ParamList("attr_1") - - -class CustomListParam2(ParameterBase): - - deps: ParamList("attr_2") - - -class MixedLayer(LayerContainer): - - list_1: CustomListParam1 - list_2: CustomListParam2 - - -class MixedInferenceModel: - - @property - def attr_1(self) -> int: - return 1 - - @property - def attr_2(self) -> int: - return 2 - - -@pytest.mark.inference_v2 -def test_mixed_param_lists(): - model = MixedInferenceModel() - - layer = MixedLayer(model) - - assert layer.list_1.deps.n_params == 1 - assert layer.list_2.deps.n_params == 2 diff --git a/tests/unit/inference/model_implementations/parameters/utils.py b/tests/unit/inference/model_implementations/parameters/utils.py deleted file mode 100644 index 0d2cbb27d40e..000000000000 --- a/tests/unit/inference/model_implementations/parameters/utils.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.allocator import on_device -from deepspeed.inference.v2.model_implementations.parameter_base import ParameterBase, ParametrizedList - - -class SimpleParam(ParameterBase): - """ - Parameter with single dependency. - """ - - param: torch.Tensor - - def finalize(self) -> torch.Tensor: - return self.inference_model.transform(self.param) - - -class SimpleParametrizedList(ParametrizedList): - """ - Parameter list based on `num_dependencies` attribute. - """ - - count_attr: str = "num_dependencies" - - -class ListParam(ParameterBase): - """ - Parameter with list dependency. - - NOTE: This uses the tuple workaround for the `ParametrizedList` class - as described in the docstring of `ParametrizedList`. - """ - - params: SimpleParametrizedList - - def finalize(self) -> torch.Tensor: - return self.inference_model.transform(torch.cat(tuple(self.params))) - - -class DummyInferenceModel: - - @property - def num_dependencies(self) -> int: - return 2 - - @on_device - def transform(self, param: torch.Tensor) -> torch.Tensor: - return param - - -def validate_device(tensor: torch.Tensor): - assert tensor.device == torch.device(get_accelerator().current_device()) diff --git a/tests/unit/inference/model_implementations/sharding/__init__.py b/tests/unit/inference/model_implementations/sharding/__init__.py deleted file mode 100644 index 208299fb8c50..000000000000 --- a/tests/unit/inference/model_implementations/sharding/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team diff --git a/tests/unit/inference/model_implementations/sharding/test_attn_out_sharding.py b/tests/unit/inference/model_implementations/sharding/test_attn_out_sharding.py deleted file mode 100644 index 850c4c24fde6..000000000000 --- a/tests/unit/inference/model_implementations/sharding/test_attn_out_sharding.py +++ /dev/null @@ -1,129 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.model_implementations.sharding import * - -# None of the logic should be dependent on head size. -HEAD_SIZE = 64 - - -def fill_with_head_ids(head_size: int, n_heads: int) -> torch.Tensor: - """ - Fills a tensor with the associated head ids. All columns should have the same value. - """ - head_ids = torch.arange(n_heads, dtype=torch.half, device=get_accelerator().current_device()) - - head_ids = head_ids.repeat_interleave(head_size).repeat(head_size * n_heads).reshape(n_heads * head_size, -1) - return head_ids - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize("n_heads, n_shards", [(1, 1), (8, 4), (32, 8)]) -def test_mha_even_sharding(n_heads: int, n_shards: int): - """ - Even head sharding for MHA. - - Args: - n_heads (int): The number QKV heads. - n_shards (int): The number of shards to test for. - """ - param = fill_with_head_ids(HEAD_SIZE, n_heads) - - n_local_heads = n_heads // n_shards - sharded_shape = (HEAD_SIZE * n_heads, HEAD_SIZE * n_local_heads) - - for shard_rank in range(n_shards): - sharded_param = shard_attn_out_param(param, shard_rank, n_shards, HEAD_SIZE) - n_heads_local_q, _ = get_local_heads(shard_rank, n_shards, n_heads) - - assert sharded_param.shape[-1] == HEAD_SIZE * n_heads_local_q - assert sharded_param.shape == sharded_shape - - heads = torch.chunk(sharded_param, n_local_heads, dim=1) - - for i, head in enumerate(heads): - assert torch.all(head == i + shard_rank * n_local_heads) - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize("n_heads, n_shards", [(3, 2), (20, 8)]) -def test_mha_unbalanced_sharding(n_heads: int, n_shards: int): - """ - Unbalanced head sharding for MHA. - - Args: - n_heads (int): The number QKV heads. - n_shards (int): The number of shards to test for. - """ - param = fill_with_head_ids(HEAD_SIZE, n_heads) - - max_heads = 0 - min_heads = n_heads - seen_heads = set() - total_heads = 0 - - for shard_rank in range(n_shards): - sharded_param = shard_attn_out_param(param, shard_rank, n_shards, HEAD_SIZE) - n_heads_local_q, _ = get_local_heads(shard_rank, n_shards, n_heads) - - assert sharded_param.shape[-1] == HEAD_SIZE * n_heads_local_q - - n_local_heads = sharded_param.shape[1] // HEAD_SIZE - total_heads += n_local_heads - max_heads = max(max_heads, n_local_heads) - min_heads = min(min_heads, n_local_heads) - - for i in range(n_local_heads): - head_ids = torch.unique_consecutive(sharded_param[:, i * HEAD_SIZE:(i + 1) * HEAD_SIZE]) - assert len(head_ids) == 1 - seen_heads.add(head_ids.item()) - - assert max_heads == min_heads + 1 - assert total_heads == n_heads - assert len(seen_heads) == n_heads - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize("n_heads_q, n_heads_kv, n_shards", [(20, 4, 8)]) -def test_gqa_uneven_sharding(n_heads_q: int, n_heads_kv: int, n_shards: int): - """ - We only test the uneven GQA test case because even GQA shards the attention output - in the exact same manner as MHA. - - Args: - n_heads_q (int): The number of query heads. - n_heads_kv (int): The number of key/value heads. - n_shards (int): The number of shards to test for. - """ - param = fill_with_head_ids(HEAD_SIZE, n_heads_q) - - min_heads = n_heads_q - max_heads = 0 - seen_heads = set() - total_heads = 0 - - for shard_rank in range(n_shards): - sharded_param = shard_attn_out_param(param, shard_rank, n_shards, HEAD_SIZE, n_heads_q, n_heads_kv) - n_heads_local_q, _ = get_local_heads(shard_rank, n_shards, n_heads_q, n_heads_kv) - - assert sharded_param.shape[-1] == HEAD_SIZE * n_heads_local_q - - n_local_heads = sharded_param.shape[1] // HEAD_SIZE - total_heads += n_local_heads - max_heads = max(max_heads, n_local_heads) - min_heads = min(min_heads, n_local_heads) - - for i in range(n_local_heads): - head_id = torch.unique_consecutive(sharded_param[:, i * HEAD_SIZE:(i + 1) * HEAD_SIZE]) - assert len(head_id) == 1 - seen_heads.add(head_id.item()) - - assert max_heads == min_heads + 1 - assert total_heads == n_heads_q - assert len(seen_heads) == n_heads_q diff --git a/tests/unit/inference/model_implementations/sharding/test_mlp_sharding.py b/tests/unit/inference/model_implementations/sharding/test_mlp_sharding.py deleted file mode 100644 index aac7e5391d8f..000000000000 --- a/tests/unit/inference/model_implementations/sharding/test_mlp_sharding.py +++ /dev/null @@ -1,116 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.model_implementations.sharding import * - - -def round_up_to_256(x: int) -> int: - """ - Round up to the nearest multiple of 256. - """ - return x + (256 - x % 256) - - -def make_params(model_dim: int, ffn_multiplier: int, n_experts: int, gated: bool = False) -> torch.Tensor: - """ - - """ - if gated: - mlp_1_intermediate = round_up_to_256(int(model_dim * ffn_multiplier * 4 / 3)) - mlp_2_intermediate = mlp_1_intermediate // 2 - else: - mlp_1_intermediate = ffn_multiplier * model_dim - mlp_2_intermediate = ffn_multiplier * model_dim - - mlp_1_shared_dim = torch.arange(mlp_1_intermediate, dtype=torch.float32, device=get_accelerator().current_device()) - - mlp_1_w = mlp_1_shared_dim.repeat_interleave(model_dim).reshape(mlp_1_intermediate, model_dim) - mlp_1_b = mlp_1_shared_dim - - mlp_2_shared_dim = torch.arange(mlp_2_intermediate, dtype=torch.float32, device=get_accelerator().current_device()) - mlp_2_w = mlp_2_shared_dim.repeat(model_dim).reshape(model_dim, mlp_2_intermediate) - mlp_2_b = torch.ones(model_dim, dtype=torch.float32, device=get_accelerator().current_device()) - - if n_experts > 1: - mlp_1_w = mlp_1_w.expand(n_experts, -1, -1) - mlp_1_b = mlp_1_b.expand(n_experts, -1) - mlp_2_w = mlp_2_w.expand(n_experts, -1, -1) - mlp_2_b = mlp_2_b.expand(n_experts, -1) - - return (mlp_1_w, mlp_1_b, mlp_2_w, mlp_2_b) - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize("model_dim, ffn_multiplier, n_shards", [(1024, 4, 1), (1024, 4, 8), (1024, 4, 6)]) -@pytest.mark.parametrize("n_experts", [1, 16]) -def test_even_ffn_sharding(model_dim: int, ffn_multiplier: int, n_shards: int, n_experts: int): - """ - FFN sharding tends to be much simpler than attention sharding since it works on larger granularities. - While the test case of (1024, 4, 6) is not a use case we're likely to see, this does ensure that - the sharding logic will round correctly for the alignments we care about. - """ - mlp_1_w, mlp_1_b, mlp_2_w, mlp_2_b = make_params(model_dim, ffn_multiplier, n_experts) - - total_ffn_dim = model_dim * ffn_multiplier - mapped_neurons = 0 - - is_moe = n_experts > 1 - - for shard_rank in range(n_shards): - shard_1_w = shard_mlp_1_param(mlp_1_w, shard_rank, n_shards, is_moe=is_moe) - shard_1_b = shard_mlp_1_param(mlp_1_b, shard_rank, n_shards, is_moe=is_moe) - shard_2_w = shard_mlp_2_param(mlp_2_w, shard_rank, n_shards, is_moe=is_moe) - shard_2_b = shard_mlp_2_param(mlp_2_b, shard_rank, n_shards, is_moe=is_moe) - - assert shard_1_w.shape[-2] == shard_2_w.shape[-1] - assert shard_1_w.shape[-2] % DEFAULT_SHARD_GRANULARITY == 0 - assert shard_1_w.shape[-2] == shard_1_b.shape[-1] - - mapped_neurons += shard_1_w.shape[-2] - - if shard_rank != 0: - assert shard_2_b is None - else: - assert shard_2_b.shape[-1] == model_dim - - assert mapped_neurons == total_ffn_dim - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize("model_dim, ffn_multiplier, n_shards", [(1024, 4, 1), (1024, 4, 8), (1024, 4, 6)]) -@pytest.mark.parametrize("n_experts", [1, 16]) -def test_gated_ffn_sharding(model_dim: int, ffn_multiplier: int, n_shards: int, n_experts: int): - """ - Test the same cases assuming a gated regime. - """ - mlp_1_w, mlp_1_b, mlp_2_w, mlp_2_b = make_params(model_dim, ffn_multiplier, n_experts, gated=True) - - total_ffn_dim = round_up_to_256(int(model_dim * ffn_multiplier * 4 / 3)) - mapped_neurons = 0 - - is_moe = n_experts > 1 - - for shard_rank in range(n_shards): - shard_1_w = shard_mlp_1_param(mlp_1_w, shard_rank, n_shards, gated=True, is_moe=is_moe) - shard_1_b = shard_mlp_1_param(mlp_1_b, shard_rank, n_shards, gated=True, is_moe=is_moe) - shard_2_w = shard_mlp_2_param(mlp_2_w, shard_rank, n_shards, is_moe=is_moe) - shard_2_b = shard_mlp_2_param(mlp_2_b, shard_rank, n_shards, is_moe=is_moe) - - assert shard_1_w.shape[-2] == shard_2_w.shape[-1] * 2 - assert shard_1_w.shape[-2] % DEFAULT_SHARD_GRANULARITY == 0 - assert shard_1_w.shape[-2] == shard_1_b.shape[-1] - - mapped_neurons += shard_1_w.shape[-2] - - if shard_rank != 0: - assert shard_2_b is None - else: - assert shard_2_b.shape[-1] == model_dim - - assert mapped_neurons == total_ffn_dim diff --git a/tests/unit/inference/model_implementations/sharding/test_qkv_sharding.py b/tests/unit/inference/model_implementations/sharding/test_qkv_sharding.py deleted file mode 100644 index 9a1cb9c09c64..000000000000 --- a/tests/unit/inference/model_implementations/sharding/test_qkv_sharding.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -from typing import Optional - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.model_implementations.sharding import * - - -def fill_with_head_ids(head_size: int, n_heads_q: int, n_heads_kv: Optional[int] = None) -> torch.Tensor: - """ - - """ - head_ids_q = torch.arange(n_heads_q, dtype=torch.half, device=get_accelerator().current_device()) - head_vals_q = head_ids_q.repeat_interleave(head_size * head_size * n_heads_q).reshape(n_heads_q * head_size, -1) - - if n_heads_kv is None: - return torch.cat([head_vals_q, head_vals_q, head_vals_q], dim=0) - - head_ids_k = torch.arange(n_heads_kv, dtype=torch.half, device=get_accelerator().current_device()) - head_vals_k = head_ids_k.repeat_interleave(head_size * head_size * n_heads_q).reshape(n_heads_kv * head_size, -1) - - return torch.cat([head_vals_q, head_vals_k, head_vals_k], dim=0) - - -def validate_inferred_shape(shard: torch.Tensor, head_size: int, n_local_q_heads: int, n_local_kv_heads: int): - """ - Validate that the leading dim of the shard is of the expected size and aligns with the sharding - logic for the attention computation itself. - """ - inferred_leading_dim = head_size * (n_local_q_heads + 2 * n_local_kv_heads) - assert shard.shape[0] == inferred_leading_dim - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize("head_size", [64]) -@pytest.mark.parametrize("n_heads,n_shards", [(1, 1), (32, 1), (32, 8)]) -def test_even_mha_sharding(head_size: int, n_heads: int, n_shards: int): - """ - Test for MHA sharding. In these scenarios, we expect that each of the shards - should be the same size. - """ - param = fill_with_head_ids(head_size, n_heads) - - heads_per_shard = n_heads // n_shards - - for shard_rank in range(n_shards): - - shard = shard_qkv_param(param, shard_rank, n_shards, head_size, n_heads, n_heads) - n_local_q_heads, n_local_kv_heads = get_local_heads(shard_rank, n_shards, n_heads, n_heads) - validate_inferred_shape(shard, head_size, n_local_q_heads, n_local_kv_heads) - - assert shard.shape == (3 * head_size * heads_per_shard, head_size * n_heads) - - heads = shard.chunk(heads_per_shard * 3, dim=0) - for i in range(heads_per_shard): - assert torch.all(heads[i] == i + shard_rank * heads_per_shard) - assert torch.all(heads[i + heads_per_shard] == i + shard_rank * heads_per_shard) - assert torch.all(heads[i + heads_per_shard * 2] == i + shard_rank * heads_per_shard) - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize("head_size", [64]) -@pytest.mark.parametrize("n_heads, n_shards", [(3, 2), (20, 8)]) -def test_unbalanced_mha_sharding(head_size: int, n_heads: int, n_shards: int): - """ - Test MHA sharding when the distribution of heads will not be equal across all ranks. - """ - param = fill_with_head_ids(head_size, n_heads) - - max_heads = 0 - min_heads = n_heads - total_heads = 0 - seen_heads = set() - - for shard_rank in range(n_shards): - shard = shard_qkv_param(param, shard_rank, n_shards, head_size, n_heads, n_heads) - n_local_q_heads, n_local_kv_heads = get_local_heads(shard_rank, n_shards, n_heads, n_heads) - validate_inferred_shape(shard, head_size, n_local_q_heads, n_local_kv_heads) - - n_heads_in_shard = shard.shape[0] // head_size // 3 - - max_heads = max(max_heads, n_heads_in_shard) - min_heads = min(min_heads, n_heads_in_shard) - total_heads += n_heads_in_shard - - heads = shard.chunk(n_heads_in_shard * 3, dim=0) - - for local_head_id in range(n_heads_in_shard): - head_qkv = torch.cat([ - heads[local_head_id], heads[local_head_id + n_heads_in_shard], - heads[local_head_id + 2 * n_heads_in_shard] - ], - dim=0) - assert head_qkv.shape == (3 * head_size, head_size * n_heads) - - global_head_id = torch.unique_consecutive(head_qkv) - assert len(global_head_id) == 1 - - seen_heads.add(global_head_id.item()) - - assert max_heads - min_heads <= 1 - assert total_heads == n_heads - assert len(seen_heads) == n_heads - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize("head_size", [64]) -@pytest.mark.parametrize("n_heads_q, n_heads_kv, n_shards", [(4, 2, 1), (8, 2, 1), (64, 16, 8)]) -def test_gqa_even_sharding(head_size: int, n_heads_q: int, n_heads_kv: int, n_shards: int): - """ - Test GQA sharding when the KV heads are evenly divisible by the number of shards. - """ - param = fill_with_head_ids(head_size, n_heads_q, n_heads_kv) - - n_kv_heads_in_shard = n_heads_kv // n_shards - n_q_heads_in_shard = n_heads_q // n_shards - - for shard_rank in range(n_shards): - shard = shard_qkv_param(param, shard_rank, n_shards, head_size, n_heads_q, n_heads_kv) - n_local_q_heads, n_local_kv_heads = get_local_heads(shard_rank, n_shards, n_heads_q, n_heads_kv) - validate_inferred_shape(shard, head_size, n_local_q_heads, n_local_kv_heads) - - assert shard.shape[0] == (n_q_heads_in_shard + n_kv_heads_in_shard * 2) * head_size - - q = shard[:n_q_heads_in_shard * head_size] - k = shard[n_q_heads_in_shard * head_size:(n_q_heads_in_shard + n_kv_heads_in_shard) * head_size] - v = shard[(n_q_heads_in_shard + n_kv_heads_in_shard) * head_size:] - - for local_head_id in range(n_q_heads_in_shard): - assert torch.all(q[local_head_id * head_size:(local_head_id + 1) * head_size] == local_head_id + - shard_rank * n_q_heads_in_shard) - - for local_head_id in range(n_kv_heads_in_shard): - assert torch.all(k[local_head_id * head_size:(local_head_id + 1) * head_size] == local_head_id + - shard_rank * n_kv_heads_in_shard) - assert torch.all(v[local_head_id * head_size:(local_head_id + 1) * head_size] == local_head_id + - shard_rank * n_kv_heads_in_shard) - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize("head_size", [64]) -@pytest.mark.parametrize("n_heads_q, n_heads_kv, n_shards", [(4, 2, 4), (20, 4, 8)]) -def test_gqa_uneven_sharding(head_size: int, n_heads_q: int, n_heads_kv: int, n_shards: int): - """ - Test GQA sharding when there are more shards than KV heads. - """ - param = fill_with_head_ids(head_size, n_heads_q, n_heads_kv) - - n_kv_heads_in_shard = 1 - n_shards_per_kv_head = n_shards // n_heads_kv - - max_heads = 0 - min_heads = n_heads_q - total_heads = 0 - seen_heads = set() - - for shard_rank in range(n_shards): - shard = shard_qkv_param(param, shard_rank, n_shards, head_size, n_heads_q, n_heads_kv) - n_local_q_heads, n_local_kv_heads = get_local_heads(shard_rank, n_shards, n_heads_q, n_heads_kv) - validate_inferred_shape(shard, head_size, n_local_q_heads, n_local_kv_heads) - - local_n_heads_q = (shard.shape[0] - 2 * n_kv_heads_in_shard * head_size) // head_size - - max_heads = max(max_heads, local_n_heads_q) - min_heads = min(min_heads, local_n_heads_q) - total_heads += local_n_heads_q - - q = shard[:local_n_heads_q * head_size] - kv = shard[local_n_heads_q * head_size:] - - for local_head_id in range(local_n_heads_q): - q_head_id = torch.unique_consecutive(q[local_head_id * head_size:(local_head_id + 1) * head_size]) - assert len(q_head_id) == 1 - - seen_heads.add(q_head_id.item()) - - kv_id_calc = shard_rank // n_shards_per_kv_head - kv_id = torch.unique_consecutive(kv) - assert len(kv_id) == 1 - assert kv_id.item() == kv_id_calc - - assert max_heads - min_heads <= 1 - assert total_heads == n_heads_q - assert len(seen_heads) == n_heads_q - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize("head_size", [64]) -@pytest.mark.parametrize("n_heads, n_shards", [(6, 8)]) -def test_unsupported_mha_configs(head_size: int, n_heads: int, n_shards: int): - """ - Sharding should fail if there are fewer heads than shards. - - TODO(cmikeh2): Look to support this configuration. - """ - param = fill_with_head_ids(head_size, n_heads) - - for shard_rank in range(n_shards): - with pytest.raises(ValueError): - shard_qkv_param(param, shard_rank, n_shards, head_size, n_heads, n_heads) - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize("head_size", [64]) -@pytest.mark.parametrize("n_heads_q, n_heads_kv, n_shards", [(5, 2, 1), (40, 10, 8), (30, 5, 8)]) -def test_unsupported_gqa_configs(head_size: int, n_heads_q: int, n_heads_kv: int, n_shards: int): - """ - GQA has stricter requirements. We must be able to evenly shard or distribute the KV heads. - - Test cases are to test the following preconditions specifically: - 1. n_heads_q % n_heads_kv == 0 - 2. We must be able to evenly distribute KV heads - 3. We must be able to evely split KV heads - """ - param = fill_with_head_ids(head_size, n_heads_q, n_heads_kv) - - for shard_rank in range(n_shards): - with pytest.raises(ValueError): - shard_qkv_param(param, shard_rank, n_shards, head_size, n_heads_q, n_heads_kv) - - -@pytest.mark.inference_v2 -def test_mha_input_shape_error(): - - param = torch.empty(256, 128) - - n_heads = 2 - head_size = 64 - - with pytest.raises(ValueError): - shard_qkv_param(param, 0, 1, 64) - - -@pytest.mark.inference_v2 -def test_gqa_input_shape_error(): - - head_size = 64 - n_heads_q = 16 - n_heads_kv = 4 - - # Correct shape is 1536 (=16 * 64 + 2 * 4 * 64), 1024 - param = torch.empty(2048, 1024) - - with pytest.raises(ValueError): - shard_qkv_param(param, 0, 1, head_size, n_heads_q, n_heads_kv) diff --git a/tests/unit/inference/modules/__init__.py b/tests/unit/inference/modules/__init__.py deleted file mode 100644 index 208299fb8c50..000000000000 --- a/tests/unit/inference/modules/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team diff --git a/tests/unit/inference/modules/test_blas_linear_module.py b/tests/unit/inference/modules/test_blas_linear_module.py deleted file mode 100644 index 18b546bab6bd..000000000000 --- a/tests/unit/inference/modules/test_blas_linear_module.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -from typing import Optional - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.inference_utils import ActivationType, DtypeEnum, is_gated -from deepspeed.inference.v2.modules import ConfigBundle -from deepspeed.inference.v2.modules.configs import DSLinearConfig -from deepspeed.inference.v2.modules.interfaces import DSLinearRegistry -from ..inference_test_utils import allclose - - -def reference_implementation(hidden_states: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor], - act_type: ActivationType) -> torch.Tensor: - dtype = hidden_states.dtype - out_states = torch.nn.functional.linear(hidden_states, weight, bias) - out_states.float() - - if is_gated(act_type): - act_func_map = { - ActivationType.ReGLU: torch.nn.functional.relu, - ActivationType.GEGLU: lambda x: torch.nn.functional.gelu(x, approximate="tanh"), - ActivationType.SiGLU: torch.nn.functional.silu, - } - - act_act = out_states[..., ::2] - act_linear = out_states[..., 1::2] - - act_act = act_func_map[act_type](act_act) - out_states = act_act * act_linear - else: - act_func_map = { - ActivationType.RELU: torch.nn.functional.relu, - ActivationType.GELU: torch.nn.functional.gelu, - ActivationType.SILU: torch.nn.functional.silu, - ActivationType.IDENTITY: lambda x: x, - } - - out_states = act_func_map[act_type](out_states) - return out_states.to(dtype) - - -def _blas_linear_helper(tokens: int, - in_channels: int, - out_channels: int, - dtype: DtypeEnum, - act_fn: ActivationType, - use_bias: bool = True) -> None: - linear_config = DSLinearConfig(max_tokens=2048, - in_channels=in_channels, - out_channels=out_channels, - activation=act_fn, - input_dtype=dtype, - output_dtype=dtype) - - bundle = ConfigBundle(name='blas_fp_linear', config=linear_config) - - module = DSLinearRegistry.instantiate_config(bundle) - - # Input vals - hidden_states = torch.randn( - (tokens, in_channels), dtype=dtype.value, device=get_accelerator().current_device_name()) * .01 - - weight_out_channels = 2 * out_channels if is_gated(act_fn) else out_channels - weight = torch.randn( - (weight_out_channels, in_channels), dtype=dtype.value, device=get_accelerator().current_device_name()) * .01 - if use_bias: - bias = torch.randn( - (weight_out_channels), dtype=dtype.value, device=get_accelerator().current_device_name()) * .01 - else: - bias = None - - # Reference output - ref_output = reference_implementation(hidden_states, weight, bias, act_fn) - - # New output - ds_output = module(hidden_states, weight, bias) - - # Check - assert allclose(ds_output, ref_output) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("tokens, in_channels, out_channels", [(1, 4608, 1728), (37, 8192, 4096), (1280, 3072, 6144)]) -def test_blas_linear_shapes(tokens: int, in_channels: int, out_channels: int) -> None: - - _blas_linear_helper(tokens, in_channels, out_channels, DtypeEnum.fp16, ActivationType.IDENTITY) - - -all_acts = [ - ActivationType.RELU, - ActivationType.GELU, - ActivationType.SILU, - ActivationType.GEGLU, - ActivationType.ReGLU, - ActivationType.SiGLU, -] - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("act_fn", all_acts) -@pytest.mark.parametrize("use_bias", [True, False]) -def test_blas_linear_act_fn(act_fn: ActivationType, use_bias: bool) -> None: - - _blas_linear_helper(283, 512, 4096, DtypeEnum.fp16, act_fn, use_bias=use_bias) diff --git a/tests/unit/inference/modules/test_blocked_attn.py b/tests/unit/inference/modules/test_blocked_attn.py deleted file mode 100644 index 1f03b46bd002..000000000000 --- a/tests/unit/inference/modules/test_blocked_attn.py +++ /dev/null @@ -1,210 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import itertools - -from typing import List, Tuple - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.modules import ConfigBundle -from deepspeed.inference.v2.modules.configs import DSSelfAttentionConfig, PositionalEmbeddingType -from deepspeed.inference.v2.modules.interfaces import DSSelfAttentionRegistry, DSSelfAttentionBase - -from ..kernels.ragged_ops.ragged_testing_utils import build_batch_and_manager -from ..inference_test_utils import allclose - -try: - from flash_attn.flash_attn_interface import flash_attn_varlen_func - validate_accuracy = True -except ImportError: - validate_accuracy = False - - -def _blocked_flash_testing_helper(head_size: int, - n_heads_q: int, - n_heads_kv: int, - seq_params: List[Tuple[int, int]], - trained_freqs: bool = None) -> None: - """ - Helper function for testing blocked flash attention. This implementation is based on - the implemnentation in ``unit.inference.kernels.ragged_ops.test_blocked_flash`` but - integrates functionality to validate the composability. - """ - if trained_freqs is None: - embed_type = PositionalEmbeddingType.none - embed_args = {} - else: - embed_type = PositionalEmbeddingType.rotate_half - if trained_freqs: - embed_args = {'trained_freqs': True} - else: - embed_args = {'trained_freqs': False} - - attn_config = DSSelfAttentionConfig(max_tokens=2048, - n_heads_q=n_heads_q, - n_heads_kv=n_heads_kv, - head_size=head_size, - max_sequences=32, - positional_embedding_type=embed_type, - positional_embedding_args=embed_args) - - config = ConfigBundle(name='dense_blocked_attention', config=attn_config) - attn_module: DSSelfAttentionBase = DSSelfAttentionRegistry.instantiate_config(config) - - kv_block_size = attn_module.kv_block_size - - kvs = [] - for _, history_len in seq_params: - if history_len > 0: - kvs.append( - torch.randn((history_len, 2 * n_heads_kv * head_size), - device=get_accelerator().current_device(), - dtype=torch.float16)) - else: - kvs.append(None) - - batch, state_manager, _ = build_batch_and_manager(seq_params, head_size, n_heads_kv, kv_block_size, kv_fill=kvs) - - qkv = torch.randn((batch.current_tokens, (n_heads_q + 2 * n_heads_kv) * head_size), - device=get_accelerator().current_device(), - dtype=torch.float16) - - kv_cache = state_manager.get_cache(0) - - attn_module.build_atoms(batch) - if not trained_freqs: - out = attn_module(qkv, kv_cache, batch) - else: - inv_freqs = torch.randn((head_size // 2, ), device=get_accelerator().current_device(), dtype=torch.float16) - out = attn_module(qkv, kv_cache, batch, inv_freqs) - - if validate_accuracy and trained_freqs is None: - cu_seqlens_q = torch.tensor([0] + list(itertools.accumulate([seq[0] for seq in seq_params])), - dtype=torch.int32, - device=get_accelerator().current_device()) - cu_seqlens_kv = torch.tensor([0] + list(itertools.accumulate([seq[1] + seq[0] for seq in seq_params])), - dtype=torch.int32, - device=get_accelerator().current_device()) - - inflight_kv = qkv[:, head_size * n_heads_q:] - full_kvs = [] - for i, kv in enumerate(kvs): - if kv is not None: - full_kvs.append(torch.cat([kv, inflight_kv[cu_seqlens_q[i]:cu_seqlens_q[i + 1]]], dim=0)) - else: - full_kvs.append(inflight_kv[cu_seqlens_q[i]:cu_seqlens_q[i + 1]]) - run_kvs = torch.cat(full_kvs, dim=0) - k = run_kvs[:, :head_size * n_heads_kv] - v = run_kvs[:, head_size * n_heads_kv:] - - q = qkv[:, :head_size * n_heads_q] - q_ref = q.reshape((batch.current_tokens, n_heads_q, head_size)) - k_ref = k.reshape((k.shape[0], n_heads_kv, head_size)) - v_ref = v.reshape((v.shape[0], n_heads_kv, head_size)) - - max_seqlen_q = max([seq[0] for seq in seq_params]) - max_seqlen_kv = max([seq[1] + seq[0] for seq in seq_params]) - - ref_o = flash_attn_varlen_func(q_ref, - k_ref, - v_ref, - cu_seqlens_q, - cu_seqlens_kv, - max_seqlen_q, - max_seqlen_kv, - softmax_scale=1.0, - causal=True) - - ref_o = ref_o.reshape(batch.current_tokens, head_size * n_heads_q) - - assert allclose(out, ref_o) - - get_accelerator().synchronize() - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("n_tokens", [2, 33, 65, 128, 256, 2037]) -def test_single_prompt(n_tokens: int) -> None: - head_size = 64 - n_heads_q = 16 - n_heads_kv = 16 - - seq_params = [(n_tokens, 0)] - _blocked_flash_testing_helper(head_size, n_heads_q, n_heads_kv, seq_params) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("prompt_lengths", [(128, 128), (192, 38), (514, 713), (83, 312, 610)]) -def test_multiple_prompts(prompt_lengths: Tuple[int, int]) -> None: - """ - Test multiple prompts in a single batch. - """ - head_size = 64 - n_heads_q = 16 - n_heads_kv = 16 - - seq_params = [(prompt_lengths[i], 0) for i in range(len(prompt_lengths))] - _blocked_flash_testing_helper(head_size, n_heads_q, n_heads_kv, seq_params) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("seq_params", [(1, 34), (43, 40), (1, 144), (64, 128), (332, 628)]) -def test_continuation(seq_params: Tuple[int, int]) -> None: - """ - Test continued generation/prompt processing. - """ - head_size = 64 - n_heads_q = 32 - n_heads_kv = 32 - - _blocked_flash_testing_helper(head_size, n_heads_q, n_heads_kv, [seq_params]) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("head_size", [64, 128]) -def test_head_size(head_size: int) -> None: - n_heads_q = 16 - n_heads_kv = 16 - seq_params = [(128, 128), (192, 38), (1, 814)] - - _blocked_flash_testing_helper(head_size, n_heads_q, n_heads_kv, seq_params) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("head_config", [(32, 8), (64, 16), (40, 8)]) -def test_gqa(head_config: Tuple[int, int]) -> None: - head_size = 128 - n_heads_q = head_config[0] - n_heads_kv = head_config[1] - - seq_params = [(128, 128), (192, 38), (1, 814)] - - _blocked_flash_testing_helper(head_size, n_heads_q, n_heads_kv, seq_params) - - -@pytest.mark.inference_v2_ops -def test_fully_composed() -> None: - head_size = 64 - n_heads_q = 16 - n_heads_kv = 16 - - seq_params = [(332, 628), (1, 718), (1, 323), (180, 5), (224, 0)] - - _blocked_flash_testing_helper(head_size, n_heads_q, n_heads_kv, seq_params) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("trained_freqs", [True, False]) -def test_rotary_emb(trained_freqs: bool) -> None: - head_size = 64 - n_heads_q = 16 - n_heads_kv = 16 - - seq_params = [(332, 628), (1, 718), (1, 323), (180, 5), (224, 0)] - - _blocked_flash_testing_helper(head_size, n_heads_q, n_heads_kv, seq_params, trained_freqs=trained_freqs) diff --git a/tests/unit/inference/modules/test_cuda_pre_ln_module.py b/tests/unit/inference/modules/test_cuda_pre_ln_module.py deleted file mode 100644 index d6c42a3e1336..000000000000 --- a/tests/unit/inference/modules/test_cuda_pre_ln_module.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -from typing import Optional, Tuple - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.modules import ConfigBundle -from deepspeed.inference.v2.modules.configs import DSNormConfig -from deepspeed.inference.v2.modules.interfaces import DSPreNormRegistry -from ..inference_test_utils import get_dtypes, allclose - - -def reference_implementation(residual: torch.Tensor, hidden_states: Optional[torch.Tensor], gamma: torch.Tensor, - beta: torch.Tensor, epsilon: float) -> Tuple[torch.Tensor, torch.Tensor]: - dtype = residual.dtype - - residual = residual.to(torch.float32) - gamma = gamma.to(torch.float32) - beta = beta.to(torch.float32) - - if hidden_states is not None: - hidden_states = hidden_states.to(torch.float32) - residual = residual + hidden_states - hidden_states = torch.nn.functional.layer_norm(residual, (residual.size(-1), ), - weight=gamma, - bias=beta, - eps=epsilon) - return residual.to(dtype), hidden_states.to(dtype) - - -def _pre_ln_test_helper(n_tokens: int, n_channels: int, dtype: torch.dtype, res_add: bool = False): - config = DSNormConfig(max_tokens=2048, - type="layer_norm", - channels=n_channels, - residual_dtype=dtype, - input_dtype=dtype, - output_dtype=dtype, - eps=1e-5) - bundle = ConfigBundle(name='cuda_pre_ln', config=config) - - # Input vals - if res_add: - hidden_states = torch.randn((n_tokens, n_channels), - dtype=dtype, - device=get_accelerator().current_device_name()) - else: - hidden_states = None - - residual = torch.randn((n_tokens, n_channels), dtype=dtype, device=get_accelerator().current_device_name()) - gamma = torch.randn((n_channels), dtype=torch.float32, device=get_accelerator().current_device_name()) - beta = torch.rand((n_channels), dtype=torch.float32, device=get_accelerator().current_device_name()) - epsilon = 1e-5 - - # Reference output - ref_residual, ref_output = reference_implementation(residual, hidden_states, gamma, beta, epsilon) - - # New output - pre_ln_module = DSPreNormRegistry.instantiate_config(bundle) - gamma = pre_ln_module.transform_param(gamma) - beta = pre_ln_module.transform_param(beta) - - ds_residual, ds_output = pre_ln_module(residual, hidden_states, gamma, beta) - - # Check - assert allclose(ds_residual, ref_residual) - assert allclose(ds_output, ref_output) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("tokens, channels", [(1, 2048), (37, 8192), (1280, 768), (2048, 5120)]) -def test_token_channels(tokens: int, channels: int) -> None: - _pre_ln_test_helper(tokens, channels, torch.float16) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("dtype", get_dtypes(include_float=False)) -def test_dtype(dtype: torch.dtype) -> None: - _pre_ln_test_helper(733, 2560, dtype) - - -@pytest.mark.inference_v2_ops -def test_no_res_add(): - _pre_ln_test_helper(733, 2560, torch.float16, res_add=False) diff --git a/tests/unit/inference/modules/test_custom_module.py b/tests/unit/inference/modules/test_custom_module.py deleted file mode 100644 index e14ccd3f2244..000000000000 --- a/tests/unit/inference/modules/test_custom_module.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.modules import ConfigBundle -from deepspeed.inference.v2.modules.interfaces import DSPostNormRegistry -from deepspeed.inference.v2.modules.configs import DSNormConfig -from deepspeed.inference.v2.modules.implementations import cuda_post_ln -from ..inference_test_utils import allclose - - -def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, - beta: torch.Tensor, epsilon: float) -> torch.Tensor: - residual_f = residual.to(torch.float32) - hidden_states_f = hidden_states.to(torch.float32) - gamma_f = gamma.to(torch.float32) - beta_f = beta.to(torch.float32) - return torch.nn.functional.layer_norm(residual_f + hidden_states_f, (hidden_states_f.size(-1), ), - weight=gamma_f, - bias=beta_f, - eps=epsilon).to(hidden_states.dtype) - - -@DSPostNormRegistry.register_module -class CustomPostLNModule(cuda_post_ln.DSPostLNCUDAModule): - - @staticmethod - def name(): - return 'custom_post_ln' - - -""" -Here, we explicitly register an LN implementation outside the core deepspeed repo. This should -validate that the registry is working as expected and we can implement modules outside the core -repo. -""" - - -@pytest.mark.inference_v2_ops -def test_custom_registration(): - channels = 4096 - dtype = torch.float16 - tokens = 1024 - - config = DSNormConfig(max_tokens=2048, - type="layer_norm", - channels=channels, - residual_dtype=dtype, - input_dtype=dtype, - output_dtype=dtype, - eps=1e-5) - bundle = ConfigBundle(name='custom_post_ln', config=config) - - # Input vals - hidden_states = torch.randn((tokens, channels), dtype=dtype, device=get_accelerator().current_device_name()) - residual = torch.randn((tokens, channels), dtype=dtype, device=get_accelerator().current_device_name()) - gamma = torch.randn((channels), dtype=torch.float32, device=get_accelerator().current_device_name()) - beta = torch.rand((channels), dtype=torch.float32, device=get_accelerator().current_device_name()) - epsilon = 1e-5 - - # Reference output - ref_output = reference_implementation(residual, hidden_states, gamma, beta, epsilon) - - # New output - post_ln_module = DSPostNormRegistry.instantiate_config(bundle) - gamma = post_ln_module.transform_param(gamma) - beta = post_ln_module.transform_param(beta) - ds_output, _ = post_ln_module(residual, hidden_states, gamma, beta) - - # Check - assert allclose(ds_output, ref_output) diff --git a/tests/unit/inference/modules/test_cutlass_moe.py b/tests/unit/inference/modules/test_cutlass_moe.py deleted file mode 100644 index 98a48b5b149d..000000000000 --- a/tests/unit/inference/modules/test_cutlass_moe.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -from typing import Tuple - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.inference_utils import ActivationType, DtypeEnum -from deepspeed.inference.v2.modules import ConfigBundle -from deepspeed.inference.v2.modules.configs import DSMoEConfig -from deepspeed.inference.v2.modules.interfaces import DSMoERegistry - -from ..kernels.ragged_ops.ragged_testing_utils import build_simple_batch -from ..inference_test_utils import allclose, get_dtypes - - -def _gating_reference(logits: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Reference gating code. - """ - logits = logits.float() - probs = torch.nn.functional.softmax(logits, dim=1) - - indices1_s = torch.argmax(probs, dim=-1) - mask1 = torch.nn.functional.one_hot(indices1_s, num_classes=logits.shape[-1]) - indices_mask = mask1.sum(dim=1) * logits.shape[-1] - 1 - indices1_s = torch.min(indices1_s, indices_mask) - - gates1_s = (probs * mask1).sum(dim=1) - - sorted_indices = indices1_s.sort()[1] - original_indices = sorted_indices.sort()[1] - - exp_count = torch.bincount(indices1_s, minlength=logits.shape[-1]).long() - exp_count_cumsum = exp_count.cumsum(dim=0) - - return sorted_indices, original_indices, exp_count_cumsum, gates1_s - - -def _reference_impl(hidden_states: torch.Tensor, gate_weight: torch.Tensor, mlp_1_w: torch.Tensor, - mlp_2_w: torch.Tensor, mlp_1_b: torch.Tensor, mlp_2_b: torch.Tensor, - act_fn: ActivationType) -> torch.Tensor: - """ - Reference implementation of the MoE module. - """ - - act_fn_dict = { - ActivationType.GELU: torch.nn.functional.gelu, - ActivationType.RELU: torch.nn.functional.relu, - ActivationType.SILU: torch.nn.functional.silu, - ActivationType.IDENTITY: lambda x: x, - } - - logits = torch.matmul(hidden_states, gate_weight.t()) - sorted_indices, original_indices, exp_count_cumsum, gate_scales = _gating_reference(logits) - - moe_input = hidden_states[sorted_indices] - - output_unordered = torch.empty_like(hidden_states) - - for expert_idx in range(mlp_1_w.shape[0]): - min_bound = 0 if expert_idx == 0 else exp_count_cumsum[expert_idx - 1] - max_bound = exp_count_cumsum[expert_idx] - - input_slice = moe_input[min_bound:max_bound] - intermediate = torch.nn.functional.linear(input_slice, mlp_1_w[expert_idx], mlp_1_b[expert_idx]) - - intermediate = act_fn_dict[act_fn](intermediate) - output_slice = torch.nn.functional.linear(intermediate, mlp_2_w[expert_idx], mlp_2_b[expert_idx]) - - output_unordered[min_bound:max_bound] = output_slice - - output = output_unordered[original_indices] - - output.mul_(gate_scales.unsqueeze(-1)).reshape(hidden_states.shape) - return output - - -def _cutlass_moe_testing_helper(tokens: int, - in_channels: int, - intermediate_dim: int, - experts: int, - dtype: int, - activation_type: ActivationType = ActivationType.GELU, - use_bias: bool = True, - iters: int = 1) -> None: - - config = DSMoEConfig(max_tokens=4096, - model_dim=in_channels, - intermediate_features=intermediate_dim, - n_experts=experts, - activation=activation_type, - input_dtype=dtype, - output_dtype=dtype) - - implementation_config = {"weight_dtype": DtypeEnum(dtype)} - - bundle = ConfigBundle(name='cutlass_multi_gemm_moe', config=config, implementation_config=implementation_config) - moe_module = DSMoERegistry.instantiate_config(bundle) - - batch = build_simple_batch([tokens]) - - # Parameters - gate_weight = torch.randn( - (experts, in_channels), dtype=dtype.value, device=get_accelerator().current_device()) * .1 - - mlp_1_w = torch.randn( - (experts, intermediate_dim, in_channels), dtype=dtype.value, device=get_accelerator().current_device()) * .1 - mlp_2_w = torch.randn( - (experts, in_channels, intermediate_dim), dtype=dtype.value, device=get_accelerator().current_device()) * .1 - - if use_bias: - mlp_1_b = torch.randn( - (experts, intermediate_dim), dtype=dtype.value, device=get_accelerator().current_device()) * .1 - mlp_2_b = torch.randn( - (experts, in_channels), dtype=dtype.value, device=get_accelerator().current_device()) * .1 - else: - mlp_1_b = None - mlp_2_b = None - - gate_ds = moe_module.transform_gate_param(gate_weight) - mlp_1_w_ds = moe_module.transform_moe_mlp_1_param(mlp_1_w) - mlp_1_b_ds = moe_module.transform_moe_mlp_1_param(mlp_1_b) - mlp_2_w_ds = moe_module.transform_moe_mlp_2_param(mlp_2_w) - mlp_2_b_ds = moe_module.transform_moe_mlp_2_param(mlp_2_b) - - for _ in range(iters): - # Input vals - hidden_states = torch.randn( - (tokens, in_channels), dtype=dtype.value, device=get_accelerator().current_device()) * .1 - - # Reference implementation - ref_output = _reference_impl(hidden_states, gate_weight, mlp_1_w, mlp_2_w, mlp_1_b, mlp_2_b, activation_type) - - output = moe_module(hidden_states, - batch, - gate_ds, - mlp_1_w_ds, - mlp_2_w_ds, - mlp_1_b=mlp_1_b_ds, - mlp_2_b=mlp_2_b_ds) - - # Increase the tolerance for larger meta ops since the error is additive - assert allclose(output, ref_output, tolerances=(1e-2, 1e-2)) - - get_accelerator().synchronize() - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("experts", [2, 32, 64]) -def test_expert_variance(experts: int) -> None: - _cutlass_moe_testing_helper(tokens=876, - in_channels=4096, - intermediate_dim=2048, - experts=experts, - dtype=DtypeEnum.fp16, - activation_type=ActivationType.IDENTITY, - use_bias=True) - - -@pytest.mark.inference_v2_ops -def test_successive_inputs(): - """ - The CUTLASS MoE uses persistent state (expert counts) that is assumed to be cleared - on each forward pass. This ensures that the module is clearing that metadata. - """ - _cutlass_moe_testing_helper(tokens=876, - in_channels=4096, - intermediate_dim=2048, - experts=64, - dtype=DtypeEnum.fp16, - activation_type=ActivationType.IDENTITY, - use_bias=True, - iters=10) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("dtype", get_dtypes(include_float=False)) -def test_dtypes(dtype: torch.dtype) -> None: - _cutlass_moe_testing_helper(tokens=876, - in_channels=4096, - intermediate_dim=2048, - experts=64, - dtype=DtypeEnum(dtype), - activation_type=ActivationType.IDENTITY, - use_bias=True) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("activation_type", [ActivationType.GELU, ActivationType.RELU, ActivationType.SILU]) -def test_activation_types(activation_type: ActivationType) -> None: - _cutlass_moe_testing_helper(tokens=876, - in_channels=4096, - intermediate_dim=2048, - experts=64, - dtype=DtypeEnum.fp16, - activation_type=activation_type, - use_bias=True) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("in_channels, out_channels", [(4096, 2048), (2048, 8192), (6144, 3072)]) -def test_in_out_channels(in_channels: int, out_channels: int) -> None: - _cutlass_moe_testing_helper(tokens=876, - in_channels=in_channels, - intermediate_dim=out_channels, - experts=64, - dtype=DtypeEnum.fp16, - activation_type=ActivationType.IDENTITY, - use_bias=True) diff --git a/tests/unit/inference/modules/test_post_ln_module.py b/tests/unit/inference/modules/test_post_ln_module.py deleted file mode 100644 index 238d8fa4d1b1..000000000000 --- a/tests/unit/inference/modules/test_post_ln_module.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.modules import ConfigBundle -from deepspeed.inference.v2.modules.configs import DSNormConfig -from deepspeed.inference.v2.modules.interfaces import DSPostNormRegistry -from ..inference_test_utils import get_dtypes, allclose - - -def reference_implementation(residual: torch.Tensor, hidden_states: torch.Tensor, gamma: torch.Tensor, - beta: torch.Tensor, epsilon: float) -> torch.Tensor: - residual_f = residual.to(torch.float32) - hidden_states_f = hidden_states.to(torch.float32) - gamma_f = gamma.to(torch.float32) - beta_f = beta.to(torch.float32) - return torch.nn.functional.layer_norm(residual_f + hidden_states_f, (hidden_states_f.size(-1), ), - weight=gamma_f, - bias=beta_f, - eps=epsilon).to(hidden_states.dtype) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("tokens, channels", [(1, 2048), (37, 8192), (1280, 768), (2048, 5120)]) -@pytest.mark.parametrize("dtype", get_dtypes()) -def test_cuda_post_ln_module(tokens: int, channels: int, dtype: torch.dtype) -> None: - config = DSNormConfig(max_tokens=2048, - type="layer_norm", - channels=channels, - residual_dtype=dtype, - input_dtype=dtype, - output_dtype=dtype, - eps=1e-5) - bundle = ConfigBundle(name='cuda_post_ln', config=config) - - # Input vals - hidden_states = torch.randn((tokens, channels), dtype=dtype, device=get_accelerator().current_device_name()) - residual = torch.randn((tokens, channels), dtype=dtype, device=get_accelerator().current_device_name()) - gamma = torch.randn((channels), dtype=torch.float32, device=get_accelerator().current_device_name()) - beta = torch.rand((channels), dtype=torch.float32, device=get_accelerator().current_device_name()) - epsilon = 1e-5 - - # Reference output - ref_output = reference_implementation(residual, hidden_states, gamma, beta, epsilon) - - # New output - post_ln_module = DSPostNormRegistry.instantiate_config(bundle) - gamma = post_ln_module.transform_param(gamma) - beta = post_ln_module.transform_param(beta) - ds_output, _ = post_ln_module(residual, hidden_states, gamma, beta) - - # Check - assert allclose(ds_output, ref_output) diff --git a/tests/unit/inference/modules/test_pre_rms_module.py b/tests/unit/inference/modules/test_pre_rms_module.py deleted file mode 100644 index bbbec2d15709..000000000000 --- a/tests/unit/inference/modules/test_pre_rms_module.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -from typing import Optional, Tuple - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.modules import ConfigBundle -from deepspeed.inference.v2.modules.configs import DSNormConfig -from deepspeed.inference.v2.modules.interfaces import DSPreNormRegistry -from ..inference_test_utils import get_dtypes, allclose - - -def reference_implementation(residual: torch.Tensor, hidden_states: Optional[torch.Tensor], gamma: torch.Tensor, - epsilon: float) -> Tuple[torch.Tensor, torch.Tensor]: - dtype = residual.dtype - - if hidden_states is not None: - hidden_states = hidden_states - residual = residual + hidden_states - - rms_vals = residual.to(torch.float32) - variance = rms_vals.pow(2).mean(-1, keepdim=True) - rms_vals = rms_vals * torch.rsqrt(variance + epsilon) - - if gamma.dtype in [torch.float16, torch.bfloat16]: - rms_vals = rms_vals.to(gamma.dtype) - - hidden_states = gamma * rms_vals - - return residual.to(dtype), hidden_states.to(dtype) - - -def _pre_rms_test_helper(n_tokens: int, n_channels: int, dtype: torch.dtype, res_add: bool = False): - config = DSNormConfig(max_tokens=2048, - type="rms_norm", - channels=n_channels, - residual_dtype=dtype, - input_dtype=dtype, - output_dtype=dtype, - eps=1e-5) - bundle = ConfigBundle(name='cuda_pre_rms', config=config) - - # Input vals - if res_add: - hidden_states = torch.randn((n_tokens, n_channels), - dtype=dtype, - device=get_accelerator().current_device_name()) - else: - hidden_states = None - - residual = torch.randn((n_tokens, n_channels), dtype=dtype, device=get_accelerator().current_device_name()) - gamma = torch.randn((n_channels), dtype=torch.float32, device=get_accelerator().current_device_name()) - epsilon = 1e-5 - - # Reference output - ref_residual, ref_output = reference_implementation(residual, hidden_states, gamma, epsilon) - - # New output - pre_ln_module = DSPreNormRegistry.instantiate_config(bundle) - gamma = pre_ln_module.transform_param(gamma) - - ds_residual, ds_output = pre_ln_module(residual, hidden_states, gamma) - - # Check - assert allclose(ds_residual, ref_residual) - assert allclose(ds_output, ref_output) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("tokens, channels", [(1, 2048), (37, 8192), (1280, 768), (2048, 5120)]) -def test_token_channels(tokens: int, channels: int) -> None: - _pre_rms_test_helper(tokens, channels, torch.float16) - - -@pytest.mark.inference_v2_ops -@pytest.mark.parametrize("dtype", get_dtypes(include_float=False)) -def test_dtype(dtype: torch.dtype) -> None: - _pre_rms_test_helper(733, 2560, dtype) - - -@pytest.mark.inference_v2_ops -def test_no_res_add(): - _pre_rms_test_helper(733, 2560, torch.float16, res_add=False) diff --git a/tests/unit/inference/ragged/__init__.py b/tests/unit/inference/ragged/__init__.py deleted file mode 100644 index 208299fb8c50..000000000000 --- a/tests/unit/inference/ragged/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team diff --git a/tests/unit/inference/ragged/test_blocked_allocator.py b/tests/unit/inference/ragged/test_blocked_allocator.py deleted file mode 100644 index 4596e81c5652..000000000000 --- a/tests/unit/inference/ragged/test_blocked_allocator.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import random -from typing import List - -import pytest -import torch - -from deepspeed.inference.v2.ragged.blocked_allocator import BlockedAllocator - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize('bad_size', [0, -1]) -def test_bad_initialization(bad_size: int) -> None: - with pytest.raises(ValueError): - BlockedAllocator(bad_size) - - -@pytest.mark.inference_v2 -def test_allocation() -> None: - - allocator = BlockedAllocator(16) - - a1 = allocator.allocate(4) - assert a1.numel() == 4 - assert allocator.free_blocks == 12 - - a2_allocs = [] - for i in range(3): - a2_allocs.append(allocator.allocate(2)) - assert allocator.free_blocks == 12 - (i + 1) * 2 - - a3 = allocator.allocate(6) - assert a3.numel() == 6 - - assert allocator.free_blocks == 0 - - # Test that we can't allocate more blocks than we have. - with pytest.raises(ValueError): - allocator.allocate(1) - - all_vals = torch.cat([a1, *a2_allocs, a3], dim=0) - unique_vals = torch.unique(all_vals, sorted=False) - assert unique_vals.numel() == all_vals.numel() - - -@pytest.mark.inference_v2 -def test_too_large_allocation(): - allocator = BlockedAllocator(16) - - with pytest.raises(ValueError): - allocator.allocate(17) - - -@pytest.mark.inference_v2 -def test_deallocation() -> None: - allocator = BlockedAllocator(16) - - # Allocate - all_blocks = allocator.allocate(16) - assert allocator.free_blocks == 0 - - # Deallocate all blocks - allocator.free(all_blocks) - assert allocator.free_blocks == 16 - - # Get all the blocks again - all_blocks = allocator.allocate(16) - - # Deallocate in chunks - c1 = all_blocks[:4] - c2 = all_blocks[4:8] - - allocator.free(c1) - assert allocator.free_blocks == 4 - - allocator.free(c2) - assert allocator.free_blocks == 8 - - with pytest.raises(ValueError): - allocator.free(c1) - - with pytest.raises(ValueError): - allocator.free(c2) - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize('index', [-1, 2]) -def test_invalid_dealloc_indices(index: int): - allocator = BlockedAllocator(1) - - with pytest.raises(ValueError): - allocator.free(torch.tensor([index])) - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize('index', [-1, 2]) -def test_invalid_alloc_indices(index: int): - allocator = BlockedAllocator(1) - allocator.allocate(1) - - to_free = [0, index] - - with pytest.raises(ValueError): - allocator.free(torch.tensor(to_free)) - - # Block 0 should not be freed if passed with an invalid index. - assert allocator.free_blocks == 0 - - allocator.free(torch.tensor([0])) - assert allocator.free_blocks == 1 - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize('test_iters', [8192]) -def test_long_running_allocation(test_iters: int) -> None: - """ - Evaluate the stability of the allocator over a longer sequence of allocations/deallocations. - """ - TOTAL_BLOCKS = 128 - - allocator = BlockedAllocator(TOTAL_BLOCKS) - - def validate_uniqueness(all_blocks: List[torch.Tensor]) -> None: - all_vals = torch.cat(all_blocks, dim=0) - assert all_vals.numel() <= TOTAL_BLOCKS - - unique_vals = torch.unique(all_vals, sorted=False) - assert unique_vals.numel() == all_vals.numel() - - all_allocs: List[torch.Tensor] = [] - num_allocs = 0 - num_frees = 0 - num_blocks_allocated = 0 - num_blocks_freed = 0 - - for _ in range(test_iters): - decision = random.randint(0, 1) - - if decision == 0: - blocks_to_allocate = random.randint(1, 24) - if blocks_to_allocate > allocator.free_blocks: - with pytest.raises(ValueError): - allocator.allocate(blocks_to_allocate) - else: - all_allocs.append(allocator.allocate(blocks_to_allocate)) - num_allocs += 1 - num_blocks_allocated += blocks_to_allocate - else: - if len(all_allocs) > 0: - idx = random.randint(0, len(all_allocs) - 1) - allocator.free(all_allocs[idx]) - - num_frees += 1 - num_blocks_freed += all_allocs[idx].numel() - - del all_allocs[idx] - - if len(all_allocs) > 0: - validate_uniqueness(all_allocs) - - assert num_allocs == num_frees + len(all_allocs) - assert num_blocks_allocated == num_blocks_freed + (TOTAL_BLOCKS - allocator.free_blocks) diff --git a/tests/unit/inference/ragged/test_manager_configs.py b/tests/unit/inference/ragged/test_manager_configs.py deleted file mode 100644 index bdd513445ddb..000000000000 --- a/tests/unit/inference/ragged/test_manager_configs.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -import pytest - -from pydantic import ValidationError - -from deepspeed.inference.v2.ragged import DSStateManagerConfig - - -@pytest.mark.inference_v2 -def test_negative_max_tracked_sequences() -> None: - with pytest.raises(ValidationError): - DSStateManagerConfig(max_tracked_sequences=-1) - - -@pytest.mark.inference_v2 -def test_zero_max_tracked_sequences() -> None: - with pytest.raises(ValidationError): - DSStateManagerConfig(max_tracked_sequences=0) - - -@pytest.mark.inference_v2 -def test_negative_max_ragged_batch_size() -> None: - with pytest.raises(ValidationError): - DSStateManagerConfig(max_ragged_batch_size=-1) - - -@pytest.mark.inference_v2 -def test_zero_max_ragged_batch_size() -> None: - with pytest.raises(ValidationError): - DSStateManagerConfig(max_ragged_batch_size=0) - - -@pytest.mark.inference_v2 -def test_negative_max_ragged_sequence_count() -> None: - with pytest.raises(ValidationError): - DSStateManagerConfig(max_ragged_sequence_count=-1) - - -@pytest.mark.inference_v2 -def test_zero_max_ragged_sequence_count() -> None: - with pytest.raises(ValidationError): - DSStateManagerConfig(max_ragged_sequence_count=0) - - -@pytest.mark.inference_v2 -def test_too_small_max_ragged_batch_size() -> None: - with pytest.raises(ValidationError): - DSStateManagerConfig(max_ragged_batch_size=512, max_ragged_sequence_count=1024) - - -@pytest.mark.inference_v2 -def test_too_small_max_tracked_sequences() -> None: - with pytest.raises(ValidationError): - DSStateManagerConfig(max_tracked_sequences=512, max_ragged_sequence_count=1024) diff --git a/tests/unit/inference/ragged/test_ragged_wrapper.py b/tests/unit/inference/ragged/test_ragged_wrapper.py deleted file mode 100644 index 3cb74f4c49d2..000000000000 --- a/tests/unit/inference/ragged/test_ragged_wrapper.py +++ /dev/null @@ -1,112 +0,0 @@ -# Copyright (c) Microsoft Corporation. -# SPDX-License-Identifier: Apache-2.0 - -# DeepSpeed Team - -from typing import List - -import pytest -import torch - -from deepspeed.accelerator import get_accelerator -from deepspeed.inference.v2.ragged import ( - PlaceholderSequenceDescriptor, - RaggedBatchWrapper, - DSStateManagerConfig, -) - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize('max_ragged_sequence_count, max_ragged_batch_size', [(128, 512), (128, 1024)]) -def test_wrapper_initialization(max_ragged_sequence_count: int, max_ragged_batch_size: int) -> None: - config = DSStateManagerConfig(max_tracked_sequences=max_ragged_sequence_count, - max_ragged_batch_size=max_ragged_batch_size, - max_ragged_sequence_count=max_ragged_sequence_count) - - batch = RaggedBatchWrapper(config) - - assert batch.current_tokens == 0 - assert batch.current_sequences == 0 - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize('seq_len', [1, 37, 128, 512]) -def test_single_sequence_batch(seq_len: int) -> None: - """ - Test we successfully construct single sequence batches and the on device metadata is accurate. - """ - - config = DSStateManagerConfig() - batch = RaggedBatchWrapper(config) - - batch.clear() - - assert batch.current_tokens == 0 - assert batch.current_sequences == 0 - - seq_desc = PlaceholderSequenceDescriptor() - tokens = torch.randint(0, 100, (seq_len, )) - batch.insert_sequence(seq_desc, tokens) - - batch.finalize() - - assert batch.current_tokens == seq_len - assert batch.current_sequences == 1 - assert torch.equal(batch.input_ids(), tokens.to(get_accelerator().current_device())) - assert torch.equal(batch.tokens_to_seq(), torch.zeros_like(tokens, device=get_accelerator().current_device())) - assert torch.equal(batch.batch_metadata_buffer(), - torch.tensor([seq_len, 1], device=get_accelerator().current_device())) - - batch.clear() - - assert batch.current_tokens == 0 - assert batch.current_sequences == 0 - - -@pytest.mark.inference_v2 -@pytest.mark.parametrize('seq_lens', [[128, 128], [1, 32, 243], [64, 1, 1, 1, 1, 393, 27, 2]]) -def test_multi_sequence_batch(seq_lens: List[int]) -> None: - """ - Test sequentially adding new tokens to a batch and validate device data structures hold - the appropriate data. - """ - config = DSStateManagerConfig() - batch = RaggedBatchWrapper(config) - - batch.clear() - - assert batch.current_tokens == 0 - assert batch.current_sequences == 0 - - all_toks = [torch.randint(0, 100, (seq_len, )) for seq_len in seq_lens] - - for i, toks in enumerate(all_toks): - seq_desc = PlaceholderSequenceDescriptor() - batch.insert_sequence(seq_desc, toks) - - assert batch.current_tokens == sum(seq_lens[:i + 1]) - assert batch.current_sequences == i + 1 - - batch.finalize() - - assert batch.current_tokens == sum(seq_lens) - assert batch.current_sequences == len(seq_lens) - - assert torch.equal(batch.input_ids(), torch.cat(all_toks, dim=0).to(get_accelerator().current_device())) - assert torch.equal( - batch.tokens_to_seq(), - torch.cat([torch.full((seq_len, ), i, dtype=torch.int32) for i, seq_len in enumerate(seq_lens)], - dim=0).to(get_accelerator().current_device())) - - for i, seq_len in enumerate(seq_lens): - assert batch.inflight_seq_descriptors()[i][0] == sum(seq_lens[:i]) - assert batch.inflight_seq_descriptors()[i][1] == seq_len - assert batch.inflight_seq_descriptors()[i][2] == 0 - - assert torch.equal(batch.batch_metadata_buffer(), - torch.tensor([sum(seq_lens), len(seq_lens)], device=get_accelerator().current_device())) - - batch.clear() - - assert batch.current_tokens == 0 - assert batch.current_sequences == 0 diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py b/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py new file mode 100644 index 000000000000..52ff0e134dfc --- /dev/null +++ b/tests/unit/inference/v2/model_implementations/parameters/test_contiguify.py @@ -0,0 +1,120 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import List + +import pytest +import torch + +from deepspeed.accelerator import get_accelerator +from deepspeed.inference.v2.model_implementations.flat_model_helpers import ( + flatten_inference_model, + restore_inference_model, +) +from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer +from .utils import SimpleParam, DummyInferenceModel + + +class TransformerLayerContainer(LayerContainer): + """ + Stub layer container + """ + PARAM_MAPPING = { + "param_1": "param_1.param", + "param_2": "param_2.param", + } + + param_1: SimpleParam + + param_2: SimpleParam + + +class NonTransformerContainer(LayerContainer): + """ + Stub layer container + """ + PARAM_MAPPING = { + "param_1": "param_1.param", + "param_2": "param_2.param", + "param_3": "param_3.param", + } + + param_1: SimpleParam + + param_2: SimpleParam + + param_3: SimpleParam + + +@pytest.mark.inference_v2 +def test_contiguify_roundtrip(): + """ + Validate that contiguify round trips and reconstructions are correct. + """ + model = DummyInferenceModel() + + n_layers = 2 + transformer_params = [] + transformer_containers = [] + + # Create parameters and populate them into the containers + for i in range(n_layers): + transformer_containers.append(TransformerLayerContainer(model)) + layer_params = [] + for j in range(2): + layer_params.append(torch.rand(16, 16)) + transformer_containers[i].set_dependency(f"param_{j+1}", layer_params[j]) + + layer_params = [p.to(get_accelerator().current_device()) for p in layer_params] + + transformer_params.append(layer_params) + assert transformer_containers[i].is_populated == True + + non_transformer_params = [] + non_transformer_container = NonTransformerContainer(model) + + for i in range(3): + non_transformer_params.append(torch.rand(16, 16).permute(1, 0)) + non_transformer_container.set_dependency(f"param_{i+1}", non_transformer_params[i]) + + non_transformer_params = [p.to(get_accelerator().current_device()) for p in non_transformer_params] + + def validate_containers(t_containers: List[LayerContainer], n_t_containers: LayerContainer, + t_params: List[List[torch.Tensor]], n_t_params: List[torch.Tensor]): + """ + Validate params match what is on the containers. + """ + for i in range(n_layers): + l_c = t_containers[i] + + assert l_c.is_initialized == True + + assert torch.equal(l_c.param_1, t_params[i][0]) + assert torch.equal(l_c.param_2, t_params[i][1]) + + assert n_t_containers.is_initialized == True + assert torch.equal(n_t_containers.param_1, n_t_params[0]) + assert torch.equal(n_t_containers.param_2, n_t_params[1]) + assert torch.equal(n_t_containers.param_3, n_t_params[2]) + assert not n_t_containers.param_1.is_contiguous() + assert not n_t_containers.param_2.is_contiguous() + assert not n_t_containers.param_3.is_contiguous() + + buffer, metadata = flatten_inference_model(transformer_containers, non_transformer_container, "NoOpPolicy") + + # Validate containers before contiguify + validate_containers(transformer_containers, non_transformer_container, transformer_params, non_transformer_params) + + # Validate restore pass + transformer_containers_r = [] + for i in range(n_layers): + transformer_containers_r.append(TransformerLayerContainer(model)) + + non_transformer_container_r = NonTransformerContainer(model) + + restore_inference_model(buffer, metadata, transformer_containers_r, non_transformer_container_r) + + validate_containers(transformer_containers_r, non_transformer_container_r, transformer_params, + non_transformer_params) diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py b/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py index 20803e53a320..07ad87e6168d 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_layer_inheritance.py @@ -6,9 +6,10 @@ import pytest import torch +from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer -from .utils import validate_device, SimpleParam, DummyInferenceModel +from .utils import SimpleParam, DummyInferenceModel class ParentLayer(LayerContainer): @@ -42,9 +43,6 @@ def test_layer_inheritance(): multi_param_layer.param_2.param = torch.full((16, 16), 2.0) - assert multi_param_layer.is_initialized is True - assert isinstance(multi_param_layer.param_1, torch.Tensor) - assert isinstance(multi_param_layer.param_2, torch.Tensor) - - validate_device(multi_param_layer.param_1) - validate_device(multi_param_layer.param_2) + assert multi_param_layer.is_populated is True + assert isinstance(multi_param_layer.param_1, InferenceParameter) + assert isinstance(multi_param_layer.param_2, InferenceParameter) diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py b/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py index 3c74d7a0479a..52313cb6f202 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_mapping.py @@ -7,6 +7,7 @@ import torch from deepspeed.inference.v2.allocator import on_device +from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.parameter_base import ParameterBase, ParamList from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer @@ -19,7 +20,8 @@ class MultiDependencyContainer(ParameterBase): @on_device def finalize(self) -> torch.Tensor: - return torch.cat([self.dependency_1, self.dependency_2]) + param = torch.cat([self.dependency_1, self.dependency_2]) + return InferenceParameter.initialize(param) class ListDependencyContainer(ParameterBase): @@ -28,7 +30,8 @@ class ListDependencyContainer(ParameterBase): @on_device def finalize(self) -> torch.Tensor: - return torch.cat(tuple(self.dependencies)) + param = torch.cat(tuple(self.dependencies)) + return InferenceParameter.initialize(param) class MappingLayer(LayerContainer): @@ -81,10 +84,10 @@ def test_mapping_syntax(): for i in range(16): mapping_layer.set_dependency(f"model.list_vals.{i}.d", torch.ones(1) * i) if i != 16 - 1: - assert mapping_layer.is_initialized == False + assert mapping_layer.is_populated == False - assert isinstance(mapping_layer.list_depend, torch.Tensor) - assert mapping_layer.is_initialized == True + assert isinstance(mapping_layer.list_depend, InferenceParameter) + assert mapping_layer.is_populated == True @pytest.mark.inference_v2 @@ -96,22 +99,22 @@ def test_sub_mapping_syntax(): mapping_layer.set_dependency("model.val.item.d_1", torch.ones(1)) mapping_layer.set_dependency("model.val.item.d_2", torch.ones(1) * 2) - assert isinstance(mapping_layer.multi_depend, torch.Tensor) + assert isinstance(mapping_layer.multi_depend, InferenceParameter) mapping_layer.set_dependency("model.val.item2.d_1", torch.ones(1)) mapping_layer.set_dependency("model.val.item2.d_2", torch.ones(1) * 2) - assert isinstance(mapping_layer.multi_depend2, torch.Tensor) + assert isinstance(mapping_layer.multi_depend2, InferenceParameter) # We want to check into double digits to make sure that this isn't specific # to single difit indexing. for i in range(16): mapping_layer.set_dependency(f"model.list_vals.{i}.d", torch.ones(1) * i) if i != 16 - 1: - assert mapping_layer.is_initialized == False + assert mapping_layer.is_populated == False - assert isinstance(mapping_layer.list_depend, torch.Tensor) - assert mapping_layer.is_initialized == True + assert isinstance(mapping_layer.list_depend, InferenceParameter) + assert mapping_layer.is_populated == True @pytest.mark.inference_v2 @@ -123,8 +126,8 @@ def test_double_mapping_syntax(): # The single parameter setting should immediately make the parameter finalized # and the whole layer initialized. - assert isinstance(mapping_layer.multi_depend, torch.Tensor) - assert mapping_layer.is_initialized == True + assert isinstance(mapping_layer.multi_depend, InferenceParameter) + assert mapping_layer.is_populated == True @pytest.mark.inference_v2 diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py b/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py index 6bfc04e97c30..b319bf6de4ad 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_multi_parameter_layer.py @@ -6,6 +6,7 @@ import pytest import torch +from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer from .utils import validate_device, SimpleParam, ListParam, DummyInferenceModel @@ -38,20 +39,17 @@ def test_multi_parameter_layer(): multi_param_layer = MultiParameterLayer(inference_model) assert multi_param_layer.n_params == 2 - assert multi_param_layer.is_initialized is False + assert multi_param_layer.is_populated is False multi_param_layer.param_1.param = torch.ones(16, 16) - assert multi_param_layer.is_initialized is False + assert multi_param_layer.is_populated is False multi_param_layer.param_2.param = torch.full((16, 16), 2.0) - assert multi_param_layer.is_initialized is True - assert isinstance(multi_param_layer.param_1, torch.Tensor) - assert isinstance(multi_param_layer.param_2, torch.Tensor) - - validate_device(multi_param_layer.param_1) - validate_device(multi_param_layer.param_2) + assert multi_param_layer.is_populated is True + assert isinstance(multi_param_layer.param_1, InferenceParameter) + assert isinstance(multi_param_layer.param_2, InferenceParameter) @pytest.mark.inference_v2 @@ -61,51 +59,21 @@ def test_mixed_multi_parameter_layer(): mixed_multi_param_layer = MixedMultiParameterLayer(inference_model) assert mixed_multi_param_layer.n_params == 2 - assert mixed_multi_param_layer.is_initialized is False + assert mixed_multi_param_layer.is_populated is False mixed_multi_param_layer.param_2.params[1] = torch.full((16, 16), 2.0) - assert mixed_multi_param_layer.is_initialized is False - assert not isinstance(mixed_multi_param_layer.param_2, torch.Tensor) + assert mixed_multi_param_layer.is_populated is False + assert not isinstance(mixed_multi_param_layer.param_2, InferenceParameter) mixed_multi_param_layer.param_1.param = torch.ones(16, 16) - assert mixed_multi_param_layer.is_initialized is False - assert isinstance(mixed_multi_param_layer.param_1, torch.Tensor) + assert mixed_multi_param_layer.is_populated is False + assert isinstance(mixed_multi_param_layer.param_1, InferenceParameter) validate_device(mixed_multi_param_layer.param_1) mixed_multi_param_layer.param_2.params[0] = torch.full((16, 16), 2.0) - assert mixed_multi_param_layer.is_initialized is True - assert isinstance(mixed_multi_param_layer.param_2, torch.Tensor) + assert mixed_multi_param_layer.is_populated is True + assert isinstance(mixed_multi_param_layer.param_2, InferenceParameter) validate_device(mixed_multi_param_layer.param_2) - - -class NoCopyInferenceModel: - - @property - def num_dependencies(self) -> int: - return 2 - - def transform(self, param: torch.Tensor) -> torch.Tensor: - return param - - -@pytest.mark.inference_v2 -def test_device_validation(): - inference_model = NoCopyInferenceModel() - - multi_param_layer = MultiParameterLayer(inference_model) - - assert multi_param_layer.n_params == 2 - assert multi_param_layer.is_initialized is False - - multi_param_layer.param_1.param = torch.ones(16, 16) - - assert multi_param_layer.is_initialized is False - - multi_param_layer.param_2.param = torch.full((16, 16), 2.0) - - with pytest.raises(RuntimeError): - # NoCopyInference model did not copy the parameters, so the device validation should fail. - assert multi_param_layer.is_initialized is True diff --git a/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py b/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py index 42edd90595fa..260236562ee9 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py +++ b/tests/unit/inference/v2/model_implementations/parameters/test_parameter_list.py @@ -6,10 +6,11 @@ import pytest import torch +from deepspeed.inference.v2.allocator import on_device +from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.parameter_base import ParameterBase, ParamList from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer from deepspeed.inference.v2.model_implementations.common_parameters import * -from deepspeed.inference.v2.allocator import on_device from .utils import validate_device @@ -30,7 +31,7 @@ def num_experts(self) -> int: @on_device def transform_moe_mlp_1_param(self, param: torch.Tensor) -> torch.Tensor: - return param + return InferenceParameter.initialize(param) @pytest.mark.inference_v2 diff --git a/tests/unit/inference/v2/model_implementations/parameters/utils.py b/tests/unit/inference/v2/model_implementations/parameters/utils.py index 0d2cbb27d40e..07d72059f9b3 100644 --- a/tests/unit/inference/v2/model_implementations/parameters/utils.py +++ b/tests/unit/inference/v2/model_implementations/parameters/utils.py @@ -7,6 +7,7 @@ from deepspeed.accelerator import get_accelerator from deepspeed.inference.v2.allocator import on_device +from deepspeed.inference.v2.inference_parameter import InferenceParameter from deepspeed.inference.v2.model_implementations.parameter_base import ParameterBase, ParametrizedList @@ -17,6 +18,7 @@ class SimpleParam(ParameterBase): param: torch.Tensor + @on_device def finalize(self) -> torch.Tensor: return self.inference_model.transform(self.param) @@ -39,6 +41,7 @@ class ListParam(ParameterBase): params: SimpleParametrizedList + @on_device def finalize(self) -> torch.Tensor: return self.inference_model.transform(torch.cat(tuple(self.params))) @@ -49,9 +52,8 @@ class DummyInferenceModel: def num_dependencies(self) -> int: return 2 - @on_device def transform(self, param: torch.Tensor) -> torch.Tensor: - return param + return InferenceParameter.initialize(param) def validate_device(tensor: torch.Tensor): From d187c3f4d1c3664d2d8054c332d30ee5ead5386a Mon Sep 17 00:00:00 2001 From: Connor Holmes Date: Mon, 13 Nov 2023 23:00:11 +0000 Subject: [PATCH 2/6] Duplicate import --- deepspeed/inference/v2/engine_factory.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/deepspeed/inference/v2/engine_factory.py b/deepspeed/inference/v2/engine_factory.py index e2fda85b381d..97fae60b88c9 100644 --- a/deepspeed/inference/v2/engine_factory.py +++ b/deepspeed/inference/v2/engine_factory.py @@ -7,7 +7,6 @@ import logging import os import pickle -from typing import Any from packaging import version from .engine_v2 import InferenceEngineV2 @@ -96,7 +95,6 @@ def build_hf_engine(path: str, elif model_config.model_type == "llama": policy = Llama2Policy(model_config, checkpoint_engine=checkpoint_engine) elif model_config.model_type == "mistral": - from .model_implementations.mistral.policy import MistralPolicy # Ensure we're using the correct version of transformers for mistral import transformers assert version.parse(transformers.__version__) >= version.parse("4.34.0"), \ From 3a6e3628f72eea0922d146f6922951616ab7627b Mon Sep 17 00:00:00 2001 From: Connor Holmes Date: Mon, 13 Nov 2023 17:55:04 -0800 Subject: [PATCH 3/6] Update engine_factory.py --- deepspeed/inference/v2/engine_factory.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deepspeed/inference/v2/engine_factory.py b/deepspeed/inference/v2/engine_factory.py index 97fae60b88c9..244000be0e7f 100644 --- a/deepspeed/inference/v2/engine_factory.py +++ b/deepspeed/inference/v2/engine_factory.py @@ -22,7 +22,7 @@ from .model_implementations.flat_model_helpers import make_metadata_filename, ModelMetadata -def buid_engine_from_ds_checkpoint(path: str, +def build_engine_from_ds_checkpoint(path: str, engine_config: RaggedInferenceEngineConfig, debug_level: int = logging.INFO) -> InferenceEngineV2: """ @@ -78,7 +78,7 @@ def build_hf_engine(path: str, """ if os.path.exists(os.path.join(path, "ds_model_config.pkl")): - return buid_engine_from_ds_checkpoint(path, engine_config, debug_level=debug_level) + return build_engine_from_ds_checkpoint(path, engine_config, debug_level=debug_level) else: # Set up logging inference_logger(level=debug_level) From 36c2652deeae90f8fdb8ab6020b5ead1e867769b Mon Sep 17 00:00:00 2001 From: Connor Holmes Date: Tue, 14 Nov 2023 01:58:58 +0000 Subject: [PATCH 4/6] Formatting --- deepspeed/inference/v2/engine_factory.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deepspeed/inference/v2/engine_factory.py b/deepspeed/inference/v2/engine_factory.py index 244000be0e7f..8ff75cc52213 100644 --- a/deepspeed/inference/v2/engine_factory.py +++ b/deepspeed/inference/v2/engine_factory.py @@ -23,8 +23,8 @@ def build_engine_from_ds_checkpoint(path: str, - engine_config: RaggedInferenceEngineConfig, - debug_level: int = logging.INFO) -> InferenceEngineV2: + engine_config: RaggedInferenceEngineConfig, + debug_level: int = logging.INFO) -> InferenceEngineV2: """ Creates an engine from a checkpoint saved by ``InferenceEngineV2``. From 79b14e883be56956c8665a2bcbbc11cfc1ebd21a Mon Sep 17 00:00:00 2001 From: Connor Holmes Date: Tue, 14 Nov 2023 02:19:37 +0000 Subject: [PATCH 5/6] Finish propagating the rename --- deepspeed/inference/v2/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepspeed/inference/v2/__init__.py b/deepspeed/inference/v2/__init__.py index c7b91db08462..ac8a42da8ab3 100644 --- a/deepspeed/inference/v2/__init__.py +++ b/deepspeed/inference/v2/__init__.py @@ -4,4 +4,4 @@ # DeepSpeed Team from .config_v2 import RaggedInferenceEngineConfig, DeepSpeedTPConfig from .engine_v2 import InferenceEngineV2 -from .engine_factory import build_hf_engine, buid_engine_from_ds_checkpoint +from .engine_factory import build_hf_engine, build_engine_from_ds_checkpoint From f5b9b79cc230953ac8c3ad90a25f41bde6b4a1e9 Mon Sep 17 00:00:00 2001 From: Connor Holmes Date: Tue, 14 Nov 2023 02:22:28 +0000 Subject: [PATCH 6/6] Finish propagating the rename v2 --- deepspeed/inference/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepspeed/inference/__init__.py b/deepspeed/inference/__init__.py index 0ee72fa36975..cdd00fec935b 100644 --- a/deepspeed/inference/__init__.py +++ b/deepspeed/inference/__init__.py @@ -4,4 +4,4 @@ # DeepSpeed Team from .v2 import RaggedInferenceEngineConfig, DeepSpeedTPConfig from .v2.engine_v2 import InferenceEngineV2 -from .v2 import build_hf_engine, buid_engine_from_ds_checkpoint +from .v2 import build_hf_engine, build_engine_from_ds_checkpoint