Skip to content

Commit

Permalink
no more prints
Browse files Browse the repository at this point in the history
  • Loading branch information
dlwh committed May 8, 2024
1 parent 95cd69e commit 8118ee5
Showing 1 changed file with 0 additions and 5 deletions.
5 changes: 0 additions & 5 deletions src/levanter/models/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -689,10 +689,6 @@ def _tpu_splash_attention(
k_ = _reshape_axes_for_bshd_bins(key, k_class, output_order=list("BHSD")).array
v_ = _reshape_axes_for_bshd_bins(value, v_class, output_order=list("BHSD")).array

# jax.debug.inspect_array_sharding(q_, callback=lambda sharding: print(f"q_: {sharding}"))
# jax.debug.inspect_array_sharding(k_, callback=lambda sharding: print(f"k_: {sharding}"))
# jax.debug.inspect_array_sharding(v_, callback=lambda sharding: print(f"v_: {sharding}"))

B, Hq, Sq, D = q_.shape
Bk, Hk, Sk, Dk = k_.shape

Expand Down Expand Up @@ -798,7 +794,6 @@ def wrap_flash_attention(q, k, v):
precision,
prng=prng,
)
print(reference_out_shape.dtype, attn_output.dtype)
attn_output = attn_output.rearrange(reference_out_shape.axes).astype(reference_out_shape.dtype)

attn_output = haliax.shard(attn_output)
Expand Down

0 comments on commit 8118ee5

Please sign in to comment.