Skip to content

Commit

Permalink
Correct cmpxchg on arm64 in the case that the stlxr fails
Browse files Browse the repository at this point in the history
The critical section wasn't idempotent! If the stlxr failed, x9 would
have already been updated, so the next loop compares against the wrong
value, which is going to produce the wrong result.
  • Loading branch information
tbodt committed Oct 27, 2024
1 parent fbe4a9f commit 67b61b5
Showing 1 changed file with 17 additions and 12 deletions.
29 changes: 17 additions & 12 deletions asbestos/gadgets-aarch64/misc.S
Original file line number Diff line number Diff line change
Expand Up @@ -90,29 +90,35 @@
.extern segfault_write

.gadget atomic_cmpxchg8b
#Test for alignment.
# Test for alignment.
tst _addr, 0x7
b.ne 2f
b.ne 3f

#cmpxchg8b via aligned exclusive 8b load
# cmpxchg8b via aligned exclusive 8b load
write_prep 64, atomic_cmpxchg8b

# load parameters: x9 = edx:eax (old value), x10 = ecx:ebx (new value)
mov w9, eax
bfi x9, xdx, 32, 32
mov w10, ebx
bfi x10, xcx, 32, 32

# run operation: load to x8, compare with x9, store x10. short circuit if comparison fails.
1:
ldaxr x8, [_xaddr]
cmp x9, x8
csel x9, x8, x9, ne
csel x8, x10, x8, eq
cset w11, eq
stlxr w12, x8, [_xaddr]
b.ne 1f
stlxr w12, x10, [_xaddr]
cbnz w12, 1b
1:
cset w11, eq

# edx:eax should always get set to the value last seen in memory (x8)
write_done 64, atomic_cmpxchg8b
ubfx xax, x9, 0, 32
ubfx xdx, x9, 32, 32
ubfx xax, x8, 0, 32
ubfx xdx, x8, 32, 32

# set flags (but only zf)
ldr w8, [_cpu, CPU_flags_res]
ldr w9, [_cpu, CPU_eflags]
and w8, w8, ~ZF_RES
Expand All @@ -122,7 +128,7 @@
gret 1
write_bullshit 64, atomic_cmpxchg8b

2: #All unaligned paths
3: # All unaligned paths
b segfault_write


Expand Down Expand Up @@ -290,5 +296,4 @@ do_helper 2
.gadget fstsw_ax
ldrh w10, [_cpu, CPU_fsw]
movs eax, w10, h
gret

gret

0 comments on commit 67b61b5

Please sign in to comment.