Skip to content

Commit

Permalink
Fix bug to load model to CPU when choosen by user.
Browse files Browse the repository at this point in the history
  • Loading branch information
NathanHB committed Oct 31, 2024
1 parent 2d8cc71 commit fdbdcee
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 3 deletions.
6 changes: 4 additions & 2 deletions lm_eval/models/huggingface.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,7 @@ def __init__(
if device and device in device_list:
self._device = torch.device(device)
eval_logger.info(f"Using device '{device}'")
print(f"Using device '{device}'")
if device in ("mps", "mps:0") and version.parse(
torch.__version__
) < version.parse("2.1"):
Expand Down Expand Up @@ -239,14 +240,15 @@ def __init__(
self.model.eval()
self.model.tie_weights()

if isinstance(pretrained, str) and (gpus >= 1 or str(self.device) == "mps"):
if isinstance(pretrained, str) and (gpus >= 1 or str(self.device) in ["mps", "cpu"]):
# TODO: can remove this whole snippet except in the mps case, perhaps?
if not (parallelize or autogptq or hasattr(self, "accelerator")):
if not (parallelize or autogptq):
# place model onto device requested manually,
# if not using HF Accelerate or device_map
# or any other option that preloads model onto device
try:
self.model.to(self.device)
print(f"Model placed onto device '{self.device}'")
except ValueError:
eval_logger.debug(
"Failed to place model onto specified device. This may be because the model is quantized via `bitsandbytes` or `device_map` is provided. If the desired GPU is being used, this message is safe to ignore."
Expand Down
2 changes: 1 addition & 1 deletion tests/models/test_huggingface.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ class Test_HFLM:
-45969.47155761719,
-7158.90625,
]
LM = HFLM(pretrained="EleutherAI/pythia-70m", device="cpu", dtype="bfloat16")
LM = HFLM(pretrained="EleutherAI/pythia-70m", device="cpu", dtype="float32")

def test_logliklihood(self) -> None:
res = self.LM.loglikelihood(self.MULTIPLE_CH)
Expand Down

0 comments on commit fdbdcee

Please sign in to comment.