Skip to content

Commit

Permalink
rename grad to gradient
Browse files Browse the repository at this point in the history
  • Loading branch information
bastiscode committed Jun 25, 2024
1 parent 509017e commit 3f88088
Showing 1 changed file with 8 additions and 8 deletions.
16 changes: 8 additions & 8 deletions python/text_utils/api/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,12 +245,12 @@ def __init__(
additional_optimizer_fn=self._additional_optimizer_fn()
)

self.clip_grad_norm: float | None = self.cfg["train"].get(
"clip_grad_norm",
self.clip_gradient_norm: float | None = self.cfg["train"].get(
"clip_gradient_norm",
None
)
self.grad_accumulation = max(1, self.cfg["train"].get(
"grad_accumulation",
self.gradient_accumulation = max(1, self.cfg["train"].get(
"gradient_accumulation",
1
))

Expand Down Expand Up @@ -1029,7 +1029,7 @@ def _train_one_epoch(self):
min_size = sys.maxsize
max_size = 0
batches = []
for i in range(self.grad_accumulation):
for i in range(self.gradient_accumulation):
batch = next(train_iter, None)
if batch is None:
break
Expand Down Expand Up @@ -1091,14 +1091,14 @@ def _train_one_epoch(self):
if first_outputs is None:
first_outputs = outputs.detach()

if self.clip_grad_norm is not None:
if self.clip_gradient_norm is not None:
self.grad_scaler.unscale_(self.optimizer)
if isinstance(self.model, FSDP):
self.model.clip_grad_norm_(self.clip_grad_norm)
self.model.clip_grad_norm_(self.clip_gradient_norm)
else:
torch.nn.utils.clip_grad_norm_(
self.model.parameters(),
self.clip_grad_norm
self.clip_gradient_norm
)

self.grad_scaler.step(self.optimizer)
Expand Down

0 comments on commit 3f88088

Please sign in to comment.