Skip to content

Commit

Permalink
hrm
Browse files Browse the repository at this point in the history
  • Loading branch information
dlwh committed May 7, 2024
1 parent 570892c commit 7581798
Showing 1 changed file with 4 additions and 4 deletions.
8 changes: 4 additions & 4 deletions src/levanter/models/attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -759,10 +759,10 @@ def wrap_flash_attention(q, k, v):
q = q.astype(jnp.float32)
k = k.astype(jnp.float32)
v = v.astype(jnp.float32)
jax.debug.inspect_array_sharding(q, callback=lambda sharding: print(f"q: {sharding}"))
jax.debug.inspect_array_sharding(k, callback=lambda sharding: print(f"k: {sharding}"))
jax.debug.inspect_array_sharding(v, callback=lambda sharding: print(f"v: {sharding}"))
print(q.dtype, k.dtype, v.dtype)
print(q.shape, k.shape, v.shape)
print(physical_axes_q, physical_axes_k, physical_axes_v)
print(q_class, k_class, v_class)
out = jax.vmap(splash_kernel)(q, k, v, segment_ids=None)
return out

Expand All @@ -789,7 +789,7 @@ def wrap_flash_attention(q, k, v):
)

attn_output = haliax.shard(attn_output)
jax.debug.inspect_array_sharding(attn_output.array, callback=lambda sharding: print(f"out: {sharding}"))
# jax.debug.inspect_array_sharding(attn_output.array, callback=lambda sharding: print(f"out: {sharding}"))

return attn_output

Expand Down

0 comments on commit 7581798

Please sign in to comment.