From ee3a4f068c7646ac2d645e7ad02e82c82c5238bf Mon Sep 17 00:00:00 2001 From: wangyicheng Date: Fri, 15 Nov 2024 06:34:32 +0000 Subject: [PATCH] [FSDP2] privateuse1 support fsdp2. (#139539) We are looking forward to supporting FSDP2 with devices other than CUDA. Please give me some coding suggestions. Thank you very much. Pull Request resolved: https://github.com/pytorch/pytorch/pull/139539 Approved by: https://github.com/kwen2501 --- torch/distributed/_composable/fsdp/_fsdp_param_group.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/torch/distributed/_composable/fsdp/_fsdp_param_group.py b/torch/distributed/_composable/fsdp/_fsdp_param_group.py index cb2269215db79..15e9165b1d1d6 100644 --- a/torch/distributed/_composable/fsdp/_fsdp_param_group.py +++ b/torch/distributed/_composable/fsdp/_fsdp_param_group.py @@ -51,8 +51,6 @@ class FSDPCommContext: def lazy_init(self, device: torch.device): self.device_handle = _get_device_handle(device.type) - if device.type not in ["cuda", "hpu"]: - raise RuntimeError("FSDP requires streams support") # Setting the all-gather/reduce-scatter streams to be higher priority # can help avoid some issues where their copies in/out are delayed and # block computation (this is different from high-pri NCCL streams)