From f0043e3bca745f54783dbd2f375e55fe9c1d5db6 Mon Sep 17 00:00:00 2001
From: Tei Im <tei.im@testinprod.io>
Date: Sun, 12 Jan 2025 21:27:37 -0800
Subject: [PATCH] Rename toU64(), toU256() for consistency with other functions

---
 rvgo/fast/parse.go |  42 ++---
 rvgo/fast/vm.go    | 298 ++++++++++++++++-----------------
 rvgo/fast/yul64.go |   8 +-
 rvgo/slow/parse.go |  42 ++---
 rvgo/slow/vm.go    | 400 ++++++++++++++++++++++-----------------------
 rvgo/slow/yul64.go |  22 +--
 6 files changed, 406 insertions(+), 406 deletions(-)

diff --git a/rvgo/fast/parse.go b/rvgo/fast/parse.go
index ae32aa68..8b5ea947 100644
--- a/rvgo/fast/parse.go
+++ b/rvgo/fast/parse.go
@@ -4,74 +4,74 @@ package fast
 // These should 1:1 match with the same definitions in the slow package.
 
 func parseImmTypeI(instr U64) U64 {
-	return signExtend64(shr64(toU64(20), instr), toU64(11))
+	return signExtend64(shr64(byteToU64(20), instr), byteToU64(11))
 }
 
 func parseImmTypeS(instr U64) U64 {
 	return signExtend64(
 		or64(
-			shl64(toU64(5), shr64(toU64(25), instr)),
-			and64(shr64(toU64(7), instr), toU64(0x1F)),
+			shl64(byteToU64(5), shr64(byteToU64(25), instr)),
+			and64(shr64(byteToU64(7), instr), byteToU64(0x1F)),
 		),
-		toU64(11))
+		byteToU64(11))
 }
 
 func parseImmTypeB(instr U64) U64 {
 	return signExtend64(
 		or64(
 			or64(
-				shl64(toU64(1), and64(shr64(toU64(8), instr), toU64(0xF))),
-				shl64(toU64(5), and64(shr64(toU64(25), instr), toU64(0x3F))),
+				shl64(byteToU64(1), and64(shr64(byteToU64(8), instr), byteToU64(0xF))),
+				shl64(byteToU64(5), and64(shr64(byteToU64(25), instr), byteToU64(0x3F))),
 			),
 			or64(
-				shl64(toU64(11), and64(shr64(toU64(7), instr), toU64(1))),
-				shl64(toU64(12), shr64(toU64(31), instr)),
+				shl64(byteToU64(11), and64(shr64(byteToU64(7), instr), byteToU64(1))),
+				shl64(byteToU64(12), shr64(byteToU64(31), instr)),
 			),
 		),
-		toU64(12),
+		byteToU64(12),
 	)
 }
 
 func parseImmTypeU(instr U64) U64 {
-	return signExtend64(shr64(toU64(12), instr), toU64(19))
+	return signExtend64(shr64(byteToU64(12), instr), byteToU64(19))
 }
 
 func parseImmTypeJ(instr U64) U64 {
 	return signExtend64(
 		or64(
 			or64(
-				and64(shr64(toU64(21), instr), shortToU64(0x3FF)),          // 10 bits for index 0:9
-				shl64(toU64(10), and64(shr64(toU64(20), instr), toU64(1))), // 1 bit for index 10
+				and64(shr64(byteToU64(21), instr), shortToU64(0x3FF)),                  // 10 bits for index 0:9
+				shl64(byteToU64(10), and64(shr64(byteToU64(20), instr), byteToU64(1))), // 1 bit for index 10
 			),
 			or64(
-				shl64(toU64(11), and64(shr64(toU64(12), instr), toU64(0xFF))), // 8 bits for index 11:18
-				shl64(toU64(19), shr64(toU64(31), instr)),                     // 1 bit for index 19
+				shl64(byteToU64(11), and64(shr64(byteToU64(12), instr), byteToU64(0xFF))), // 8 bits for index 11:18
+				shl64(byteToU64(19), shr64(byteToU64(31), instr)),                         // 1 bit for index 19
 			),
 		),
-		toU64(19),
+		byteToU64(19),
 	)
 }
 
 func parseOpcode(instr U64) U64 {
-	return and64(instr, toU64(0x7F))
+	return and64(instr, byteToU64(0x7F))
 }
 
 func parseRd(instr U64) U64 {
-	return and64(shr64(toU64(7), instr), toU64(0x1F))
+	return and64(shr64(byteToU64(7), instr), byteToU64(0x1F))
 }
 
 func parseFunct3(instr U64) U64 {
-	return and64(shr64(toU64(12), instr), toU64(0x7))
+	return and64(shr64(byteToU64(12), instr), byteToU64(0x7))
 }
 
 func parseRs1(instr U64) U64 {
-	return and64(shr64(toU64(15), instr), toU64(0x1F))
+	return and64(shr64(byteToU64(15), instr), byteToU64(0x1F))
 }
 
 func parseRs2(instr U64) U64 {
-	return and64(shr64(toU64(20), instr), toU64(0x1F))
+	return and64(shr64(byteToU64(20), instr), byteToU64(0x1F))
 }
 
 func parseFunct7(instr U64) U64 {
-	return shr64(toU64(25), instr)
+	return shr64(byteToU64(25), instr)
 }
diff --git a/rvgo/fast/vm.go b/rvgo/fast/vm.go
index 98cdc767..8da8cb08 100644
--- a/rvgo/fast/vm.go
+++ b/rvgo/fast/vm.go
@@ -271,19 +271,19 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 	//
 	writePreimageKey := func(addr U64, count U64) U64 {
 		// adjust count down, so we only have to read a single 32 byte leaf of memory
-		alignment := and64(addr, toU64(31))
-		maxData := sub64(toU64(32), alignment)
+		alignment := and64(addr, byteToU64(31))
+		maxData := sub64(byteToU64(32), alignment)
 		if gt64(count, maxData) != 0 {
 			count = maxData
 		}
 
 		dat := b32asBEWord(getMemoryB32(sub64(addr, alignment), 1))
 		// shift out leading bits
-		dat = shl(u64ToU256(shl64(toU64(3), alignment)), dat)
+		dat = shl(u64ToU256(shl64(byteToU64(3), alignment)), dat)
 		// shift to right end, remove trailing bits
-		dat = shr(u64ToU256(shl64(toU64(3), sub64(toU64(32), count))), dat)
+		dat = shr(u64ToU256(shl64(byteToU64(3), sub64(byteToU64(32), count))), dat)
 
-		bits := shl(toU256(3), u64ToU256(count))
+		bits := shl(byteToU256(3), u64ToU256(count))
 
 		preImageKey := getPreimageKey()
 
@@ -294,7 +294,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 
 		// We reset the pre-image value offset back to 0 (the right part of the merkle pair)
 		setPreimageKey(beWordAsB32(key))
-		setPreimageOffset(toU64(0))
+		setPreimageOffset(byteToU64(0))
 		return count
 	}
 
@@ -307,10 +307,10 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 			revertWithCode(riscv.ErrFailToReadPreimage, err)
 		}
 		if iszero64(pdatlen) { // EOF
-			return toU64(0)
+			return byteToU64(0)
 		}
-		alignment := and64(addr, toU64(31))    // how many bytes addr is offset from being left-aligned
-		maxData := sub64(toU64(32), alignment) // higher alignment leaves less room for data this step
+		alignment := and64(addr, byteToU64(31))    // how many bytes addr is offset from being left-aligned
+		maxData := sub64(byteToU64(32), alignment) // higher alignment leaves less room for data this step
 		if gt64(count, maxData) != 0 {
 			count = maxData
 		}
@@ -318,9 +318,9 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 			count = pdatlen
 		}
 
-		bits := shl64(toU64(3), sub64(toU64(32), count))             // 32-count, in bits
-		mask := not(sub(shl(u64ToU256(bits), toU256(1)), toU256(1))) // left-aligned mask for count bytes
-		alignmentBits := u64ToU256(shl64(toU64(3), alignment))
+		bits := shl64(byteToU64(3), sub64(byteToU64(32), count))             // 32-count, in bits
+		mask := not(sub(shl(u64ToU256(bits), byteToU256(1)), byteToU256(1))) // left-aligned mask for count bytes
+		alignmentBits := u64ToU256(shl64(byteToU64(3), alignment))
 		mask = shr(alignmentBits, mask)                  // mask of count bytes, shifted by alignment
 		pdat := shr(alignmentBits, b32asBEWord(pdatB32)) // pdat, shifted by alignment
 
@@ -339,42 +339,42 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 	// Syscall handling
 	//
 	sysCall := func() {
-		a7 := getRegister(toU64(17))
+		a7 := getRegister(byteToU64(17))
 		switch a7 {
 		case riscv.SysExit: // exit the calling thread. No multi-thread support yet, so just exit.
-			a0 := getRegister(toU64(10))
+			a0 := getRegister(byteToU64(10))
 			setExitCode(uint8(a0))
 			setExited()
 			// program stops here, no need to change registers.
 		case riscv.SysExitGroup: // exit-group
-			a0 := getRegister(toU64(10))
+			a0 := getRegister(byteToU64(10))
 			setExitCode(uint8(a0))
 			setExited()
 		case riscv.SysBrk: // brk
 			// Go sys_linux_riscv64 runtime will only ever call brk(NULL), i.e. first argument (register a0) set to 0.
 
 			// brk(0) changes nothing about the memory, and returns the current page break
-			v := shl64(toU64(30), toU64(1)) // set program break at 1 GiB
-			setRegister(toU64(10), v)
-			setRegister(toU64(11), toU64(0)) // no error
+			v := shl64(byteToU64(30), byteToU64(1)) // set program break at 1 GiB
+			setRegister(byteToU64(10), v)
+			setRegister(byteToU64(11), byteToU64(0)) // no error
 		case riscv.SysMmap: // mmap
 			// A0 = addr (hint)
-			addr := getRegister(toU64(10))
+			addr := getRegister(byteToU64(10))
 			// A1 = n (length)
-			length := getRegister(toU64(11))
+			length := getRegister(byteToU64(11))
 			// A2 = prot (memory protection type, can ignore)
 			// A3 = flags (shared with other process and or written back to file)
-			flags := getRegister(toU64(13))
+			flags := getRegister(byteToU64(13))
 			// A4 = fd (file descriptor, can ignore because we support anon memory only)
-			fd := getRegister(toU64(14))
+			fd := getRegister(byteToU64(14))
 			// A5 = offset (offset in file, we don't support any non-anon memory, so we can ignore this)
 
-			errCode := toU64(0)
+			errCode := byteToU64(0)
 
 			// ensure MAP_ANONYMOUS is set and fd == -1
 			if (flags&0x20) == 0 || fd != u64Mask() {
 				addr = u64Mask()
-				errCode = toU64(0x4d) // EBADF
+				errCode = byteToU64(0x4d) // EBADF
 			} else {
 				// ignore: prot, flags, fd, offset
 				switch addr {
@@ -394,35 +394,35 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 					//fmt.Printf("mmap: 0x%016x (0x%x allowed)\n", addr, length)
 				}
 			}
-			setRegister(toU64(10), addr)
-			setRegister(toU64(11), errCode)
+			setRegister(byteToU64(10), addr)
+			setRegister(byteToU64(11), errCode)
 		case riscv.SysRead: // read
-			fd := getRegister(toU64(10))    // A0 = fd
-			addr := getRegister(toU64(11))  // A1 = *buf addr
-			count := getRegister(toU64(12)) // A2 = count
+			fd := getRegister(byteToU64(10))    // A0 = fd
+			addr := getRegister(byteToU64(11))  // A1 = *buf addr
+			count := getRegister(byteToU64(12)) // A2 = count
 			var n U64
 			var errCode U64
 			switch fd {
 			case riscv.FdStdin: // stdin
-				n = toU64(0) // never read anything from stdin
-				errCode = toU64(0)
+				n = byteToU64(0) // never read anything from stdin
+				errCode = byteToU64(0)
 			case riscv.FdHintRead: // hint-read
 				// say we read it all, to continue execution after reading the hint-write ack response
 				n = count
-				errCode = toU64(0)
+				errCode = byteToU64(0)
 			case riscv.FdPreimageRead: // preimage read
 				n = readPreimageValue(addr, count)
-				errCode = toU64(0)
+				errCode = byteToU64(0)
 			default:
-				n = u64Mask()         //  -1 (reading error)
-				errCode = toU64(0x4d) // EBADF
+				n = u64Mask()             //  -1 (reading error)
+				errCode = byteToU64(0x4d) // EBADF
 			}
-			setRegister(toU64(10), n)
-			setRegister(toU64(11), errCode)
+			setRegister(byteToU64(10), n)
+			setRegister(byteToU64(11), errCode)
 		case riscv.SysWrite: // write
-			fd := getRegister(toU64(10))    // A0 = fd
-			addr := getRegister(toU64(11))  // A1 = *buf addr
-			count := getRegister(toU64(12)) // A2 = count
+			fd := getRegister(byteToU64(10))    // A0 = fd
+			addr := getRegister(byteToU64(11))  // A1 = *buf addr
+			count := getRegister(byteToU64(12)) // A2 = count
 			var n U64
 			var errCode U64
 			switch fd {
@@ -432,14 +432,14 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 					panic(fmt.Errorf("stdout writing err: %w", err))
 				}
 				n = count // write completes fully in single instruction step
-				errCode = toU64(0)
+				errCode = byteToU64(0)
 			case riscv.FdStderr: // stderr
 				_, err := io.Copy(inst.stdErr, s.Memory.ReadMemoryRange(addr, count))
 				if err != nil {
 					panic(fmt.Errorf("stderr writing err: %w", err))
 				}
 				n = count // write completes fully in single instruction step
-				errCode = toU64(0)
+				errCode = byteToU64(0)
 			case riscv.FdHintWrite: // hint-write
 				hintData, _ := io.ReadAll(s.Memory.ReadMemoryRange(addr, count))
 				s.LastHint = append(inst.state.LastHint, hintData...)
@@ -454,91 +454,91 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 					}
 				}
 				n = count
-				errCode = toU64(0)
+				errCode = byteToU64(0)
 			case riscv.FdPreimageWrite: // pre-image key write
 				n = writePreimageKey(addr, count)
-				errCode = toU64(0) // no error
+				errCode = byteToU64(0) // no error
 			default: // any other file, including (3) hint read (5) preimage read
-				n = u64Mask()         //  -1 (writing error)
-				errCode = toU64(0x4d) // EBADF
+				n = u64Mask()             //  -1 (writing error)
+				errCode = byteToU64(0x4d) // EBADF
 			}
-			setRegister(toU64(10), n)
-			setRegister(toU64(11), errCode)
+			setRegister(byteToU64(10), n)
+			setRegister(byteToU64(11), errCode)
 		case riscv.SysFcntl: // fcntl - file descriptor manipulation / info lookup
-			fd := getRegister(toU64(10))  // A0 = fd
-			cmd := getRegister(toU64(11)) // A1 = cmd
+			fd := getRegister(byteToU64(10))  // A0 = fd
+			cmd := getRegister(byteToU64(11)) // A1 = cmd
 			var out U64
 			var errCode U64
 			switch cmd {
 			case 0x1: // F_GETFD: get file descriptor flags
 				switch fd {
 				case 0: // stdin
-					out = toU64(0) // no flag set
+					out = byteToU64(0) // no flag set
 				case 1: // stdout
-					out = toU64(0) // no flag set
+					out = byteToU64(0) // no flag set
 				case 2: // stderr
-					out = toU64(0) // no flag set
+					out = byteToU64(0) // no flag set
 				case 3: // hint-read
-					out = toU64(0) // no flag set
+					out = byteToU64(0) // no flag set
 				case 4: // hint-write
-					out = toU64(0) // no flag set
+					out = byteToU64(0) // no flag set
 				case 5: // pre-image read
-					out = toU64(0) // no flag set
+					out = byteToU64(0) // no flag set
 				case 6: // pre-image write
-					out = toU64(0) // no flag set
+					out = byteToU64(0) // no flag set
 				default:
 					out = u64Mask()
-					errCode = toU64(0x4d) //EBADF
+					errCode = byteToU64(0x4d) //EBADF
 				}
 			case 0x3: // F_GETFL: get file descriptor flags
 				switch fd {
 				case 0: // stdin
-					out = toU64(0) // O_RDONLY
+					out = byteToU64(0) // O_RDONLY
 				case 1: // stdout
-					out = toU64(1) // O_WRONLY
+					out = byteToU64(1) // O_WRONLY
 				case 2: // stderr
-					out = toU64(1) // O_WRONLY
+					out = byteToU64(1) // O_WRONLY
 				case 3: // hint-read
-					out = toU64(0) // O_RDONLY
+					out = byteToU64(0) // O_RDONLY
 				case 4: // hint-write
-					out = toU64(1) // O_WRONLY
+					out = byteToU64(1) // O_WRONLY
 				case 5: // pre-image read
-					out = toU64(0) // O_RDONLY
+					out = byteToU64(0) // O_RDONLY
 				case 6: // pre-image write
-					out = toU64(1) // O_WRONLY
+					out = byteToU64(1) // O_WRONLY
 				default:
 					out = u64Mask()
-					errCode = toU64(0x4d) // EBADF
+					errCode = byteToU64(0x4d) // EBADF
 				}
 			default: // no other commands: don't allow changing flags, duplicating FDs, etc.
 				out = u64Mask()
-				errCode = toU64(0x16) // EINVAL (cmd not recognized by this kernel)
+				errCode = byteToU64(0x16) // EINVAL (cmd not recognized by this kernel)
 			}
-			setRegister(toU64(10), out)
-			setRegister(toU64(11), errCode) // EBADF
+			setRegister(byteToU64(10), out)
+			setRegister(byteToU64(11), errCode) // EBADF
 		case riscv.SysOpenat: // openat - the Go linux runtime will try to open optional /sys/kernel files for performance hints
-			setRegister(toU64(10), u64Mask())
-			setRegister(toU64(11), toU64(0xd)) // EACCES - no access allowed
+			setRegister(byteToU64(10), u64Mask())
+			setRegister(byteToU64(11), byteToU64(0xd)) // EACCES - no access allowed
 		case riscv.SysClockGettime: // clock_gettime
-			addr := getRegister(toU64(11)) // addr of timespec struct
+			addr := getRegister(byteToU64(11)) // addr of timespec struct
 			// write 1337s + 42ns as time
-			value := or(shortToU256(1337), shl(shortToU256(64), toU256(42)))
-			storeMemUnaligned(addr, toU64(16), value, 1, 2, true, true)
-			setRegister(toU64(10), toU64(0))
-			setRegister(toU64(11), toU64(0))
+			value := or(shortToU256(1337), shl(shortToU256(64), byteToU256(42)))
+			storeMemUnaligned(addr, byteToU64(16), value, 1, 2, true, true)
+			setRegister(byteToU64(10), byteToU64(0))
+			setRegister(byteToU64(11), byteToU64(0))
 		case riscv.SysClone: // clone - not supported
-			setRegister(toU64(10), toU64(1))
-			setRegister(toU64(11), toU64(0))
+			setRegister(byteToU64(10), byteToU64(1))
+			setRegister(byteToU64(11), byteToU64(0))
 		case riscv.SysGetrlimit: // getrlimit
-			res := getRegister(toU64(10))
-			addr := getRegister(toU64(11))
+			res := getRegister(byteToU64(10))
+			addr := getRegister(byteToU64(11))
 			switch res {
 			case 0x7: // RLIMIT_NOFILE
 				// first 8 bytes: soft limit. 1024 file handles max open
 				// second 8 bytes: hard limit
-				storeMemUnaligned(addr, toU64(16), or(shortToU256(1024), shl(toU256(64), shortToU256(1024))), 1, 2, true, true)
-				setRegister(toU64(10), toU64(0))
-				setRegister(toU64(11), toU64(0))
+				storeMemUnaligned(addr, byteToU64(16), or(shortToU256(1024), shl(byteToU256(64), shortToU256(1024))), 1, 2, true, true)
+				setRegister(byteToU64(10), byteToU64(0))
+				setRegister(byteToU64(11), byteToU64(0))
 			default:
 				revertWithCode(riscv.ErrUnrecognizedResource, &UnrecognizedResourceErr{Resource: res})
 			}
@@ -550,8 +550,8 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 			revertWithCode(riscv.ErrInvalidSyscall, &UnsupportedSyscallErr{SyscallNum: a7})
 		default:
 			// Ignore(no-op) unsupported system calls
-			setRegister(toU64(10), toU64(0))
-			setRegister(toU64(11), toU64(0))
+			setRegister(byteToU64(10), byteToU64(0))
+			setRegister(byteToU64(11), byteToU64(0))
 			// List of ignored(no-op) syscalls used by op-program:
 			// sched_getaffinity - hardcode to indicate affinity with any cpu-set mask
 			// sched_yield - nothing to yield, synchronous execution only, for now
@@ -571,10 +571,10 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 	if getExited() { // early exit if we can
 		return nil
 	}
-	setStep(add64(getStep(), toU64(1)))
+	setStep(add64(getStep(), byteToU64(1)))
 
 	pc := getPC()
-	instr := loadMem(pc, toU64(4), false, 0, 0xff) // raw instruction
+	instr := loadMem(pc, byteToU64(4), false, 0, 0xff) // raw instruction
 
 	// these fields are ignored if not applicable to the instruction type / opcode
 	opcode := parseOpcode(instr)
@@ -589,48 +589,48 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 		// LB, LH, LW, LD, LBU, LHU, LWU
 
 		// bits[14:12] set to 111 are reserved
-		if eq64(funct3, toU64(0x7)) != 0 {
+		if eq64(funct3, byteToU64(0x7)) != 0 {
 			revertWithCode(riscv.ErrInvalidSyscall, fmt.Errorf("illegal instruction %d: reserved instruction encoding", instr))
 		}
 
 		imm := parseImmTypeI(instr)
-		signed := iszero64(and64(funct3, toU64(4)))      // 4 = 100 -> bitflag
-		size := shl64(and64(funct3, toU64(3)), toU64(1)) // 3 = 11 -> 1, 2, 4, 8 bytes size
+		signed := iszero64(and64(funct3, byteToU64(4)))          // 4 = 100 -> bitflag
+		size := shl64(and64(funct3, byteToU64(3)), byteToU64(1)) // 3 = 11 -> 1, 2, 4, 8 bytes size
 		rs1Value := getRegister(rs1)
-		memIndex := add64(rs1Value, signExtend64(imm, toU64(11)))
+		memIndex := add64(rs1Value, signExtend64(imm, byteToU64(11)))
 		rdValue := loadMem(memIndex, size, signed, 1, 2)
 		setRegister(rd, rdValue)
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x23: // 010_0011: memory storing
 		// SB, SH, SW, SD
 		imm := parseImmTypeS(instr)
-		size := shl64(funct3, toU64(1))
+		size := shl64(funct3, byteToU64(1))
 		value := getRegister(rs2)
 		rs1Value := getRegister(rs1)
-		memIndex := add64(rs1Value, signExtend64(imm, toU64(11)))
+		memIndex := add64(rs1Value, signExtend64(imm, byteToU64(11)))
 		storeMem(memIndex, size, value, 1, 2, true, true)
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x63: // 110_0011: branching
 		rs1Value := getRegister(rs1)
 		rs2Value := getRegister(rs2)
-		branchHit := toU64(0)
+		branchHit := byteToU64(0)
 		switch funct3 {
 		case 0: // 000 = BEQ
 			branchHit = eq64(rs1Value, rs2Value)
 		case 1: // 001 = BNE
-			branchHit = and64(not64(eq64(rs1Value, rs2Value)), toU64(1))
+			branchHit = and64(not64(eq64(rs1Value, rs2Value)), byteToU64(1))
 		case 4: // 100 = BLT
 			branchHit = slt64(rs1Value, rs2Value)
 		case 5: // 101 = BGE
-			branchHit = and64(not64(slt64(rs1Value, rs2Value)), toU64(1))
+			branchHit = and64(not64(slt64(rs1Value, rs2Value)), byteToU64(1))
 		case 6: // 110 = BLTU
 			branchHit = lt64(rs1Value, rs2Value)
 		case 7: // 111 = BGEU
-			branchHit = and64(not64(lt64(rs1Value, rs2Value)), toU64(1))
+			branchHit = and64(not64(lt64(rs1Value, rs2Value)), byteToU64(1))
 		}
 		switch branchHit {
 		case 0:
-			pc = add64(pc, toU64(4))
+			pc = add64(pc, byteToU64(4))
 		default:
 			imm := parseImmTypeB(instr)
 			// imm is a signed offset, in multiples of 2 bytes.
@@ -647,7 +647,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 		case 0: // 000 = ADDI
 			rdValue = add64(rs1Value, imm)
 		case 1: // 001 = SLLI
-			rdValue = shl64(and64(imm, toU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode
+			rdValue = shl64(and64(imm, byteToU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode
 		case 2: // 010 = SLTI
 			rdValue = slt64(rs1Value, imm)
 		case 3: // 011 = SLTIU
@@ -655,11 +655,11 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 		case 4: // 100 = XORI
 			rdValue = xor64(rs1Value, imm)
 		case 5: // 101 = SR~
-			switch shr64(toU64(6), imm) { // in rv64i the top 6 bits select the shift type
+			switch shr64(byteToU64(6), imm) { // in rv64i the top 6 bits select the shift type
 			case 0x00: // 000000 = SRLI
-				rdValue = shr64(and64(imm, toU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode
+				rdValue = shr64(and64(imm, byteToU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode
 			case 0x10: // 010000 = SRAI
-				rdValue = sar64(and64(imm, toU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode
+				rdValue = sar64(and64(imm, byteToU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode
 			}
 		case 6: // 110 = ORI
 			rdValue = or64(rs1Value, imm)
@@ -667,7 +667,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 			rdValue = and64(rs1Value, imm)
 		}
 		setRegister(rd, rdValue)
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x1B: // 001_1011: immediate arithmetic and logic signed 32 bit
 		rs1Value := getRegister(rs1)
 		imm := parseImmTypeI(instr)
@@ -676,18 +676,18 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 		case 0: // 000 = ADDIW
 			rdValue = mask32Signed64(add64(rs1Value, imm))
 		case 1: // 001 = SLLIW
-			rdValue = mask32Signed64(shl64(and64(imm, toU64(0x1F)), rs1Value))
+			rdValue = mask32Signed64(shl64(and64(imm, byteToU64(0x1F)), rs1Value))
 		case 5: // 101 = SR~
-			shamt := and64(imm, toU64(0x1F))
-			switch shr64(toU64(5), imm) { // top 7 bits select the shift type
+			shamt := and64(imm, byteToU64(0x1F))
+			switch shr64(byteToU64(5), imm) { // top 7 bits select the shift type
 			case 0x00: // 0000000 = SRLIW
-				rdValue = signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), toU64(31))
+				rdValue = signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), byteToU64(31))
 			case 0x20: // 0100000 = SRAIW
-				rdValue = signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), sub64(toU64(31), shamt))
+				rdValue = signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), sub64(byteToU64(31), shamt))
 			}
 		}
 		setRegister(rd, rdValue)
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x33: // 011_0011: register arithmetic and logic
 		rs1Value := getRegister(rs1)
 		rs2Value := getRegister(rs2)
@@ -698,11 +698,11 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 			case 0: // 000 = MUL: signed x signed
 				rdValue = mul64(rs1Value, rs2Value)
 			case 1: // 001 = MULH: upper bits of signed x signed
-				rdValue = u256ToU64(shr(toU256(64), mul(signExtend64To256(rs1Value), signExtend64To256(rs2Value))))
+				rdValue = u256ToU64(shr(byteToU256(64), mul(signExtend64To256(rs1Value), signExtend64To256(rs2Value))))
 			case 2: // 010 = MULHSU: upper bits of signed x unsigned
-				rdValue = u256ToU64(shr(toU256(64), mul(signExtend64To256(rs1Value), u64ToU256(rs2Value))))
+				rdValue = u256ToU64(shr(byteToU256(64), mul(signExtend64To256(rs1Value), u64ToU256(rs2Value))))
 			case 3: // 011 = MULHU: upper bits of unsigned x unsigned
-				rdValue = u256ToU64(shr(toU256(64), mul(u64ToU256(rs1Value), u64ToU256(rs2Value))))
+				rdValue = u256ToU64(shr(byteToU256(64), mul(u64ToU256(rs1Value), u64ToU256(rs2Value))))
 			case 4: // 100 = DIV
 				switch rs2Value {
 				case 0:
@@ -742,7 +742,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 					rdValue = sub64(rs1Value, rs2Value)
 				}
 			case 1: // 001 = SLL
-				rdValue = shl64(and64(rs2Value, toU64(0x3F)), rs1Value) // only the low 6 bits are consider in RV6VI
+				rdValue = shl64(and64(rs2Value, byteToU64(0x3F)), rs1Value) // only the low 6 bits are consider in RV6VI
 			case 2: // 010 = SLT
 				rdValue = slt64(rs1Value, rs2Value)
 			case 3: // 011 = SLTU
@@ -752,9 +752,9 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 			case 5: // 101 = SR~
 				switch funct7 {
 				case 0x00: // 0000000 = SRL
-					rdValue = shr64(and64(rs2Value, toU64(0x3F)), rs1Value) // logical: fill with zeroes
+					rdValue = shr64(and64(rs2Value, byteToU64(0x3F)), rs1Value) // logical: fill with zeroes
 				case 0x20: // 0100000 = SRA
-					rdValue = sar64(and64(rs2Value, toU64(0x3F)), rs1Value) // arithmetic: sign bit is extended
+					rdValue = sar64(and64(rs2Value, byteToU64(0x3F)), rs1Value) // arithmetic: sign bit is extended
 				}
 			case 6: // 110 = OR
 				rdValue = or64(rs1Value, rs2Value)
@@ -763,7 +763,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 			}
 		}
 		setRegister(rd, rdValue)
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x3B: // 011_1011: register arithmetic and logic in 32 bits
 		rs1Value := getRegister(rs1)
 		rs2Value := getRegister(rs2)
@@ -812,35 +812,35 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 					rdValue = mask32Signed64(sub64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask())))
 				}
 			case 1: // 001 = SLLW
-				rdValue = mask32Signed64(shl64(and64(rs2Value, toU64(0x1F)), rs1Value))
+				rdValue = mask32Signed64(shl64(and64(rs2Value, byteToU64(0x1F)), rs1Value))
 			case 5: // 101 = SR~
-				shamt := and64(rs2Value, toU64(0x1F))
+				shamt := and64(rs2Value, byteToU64(0x1F))
 				switch funct7 {
 				case 0x00: // 0000000 = SRLW
-					rdValue = signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), toU64(31))
+					rdValue = signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), byteToU64(31))
 				case 0x20: // 0100000 = SRAW
-					rdValue = signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), sub64(toU64(31), shamt))
+					rdValue = signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), sub64(byteToU64(31), shamt))
 				}
 			}
 		}
 		setRegister(rd, rdValue)
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x37: // 011_0111: LUI = Load upper immediate
 		imm := parseImmTypeU(instr)
-		rdValue := shl64(toU64(12), imm)
+		rdValue := shl64(byteToU64(12), imm)
 		setRegister(rd, rdValue)
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x17: // 001_0111: AUIPC = Add upper immediate to PC
 		imm := parseImmTypeU(instr)
-		rdValue := add64(pc, signExtend64(shl64(toU64(12), imm), toU64(31)))
+		rdValue := add64(pc, signExtend64(shl64(byteToU64(12), imm), byteToU64(31)))
 		setRegister(rd, rdValue)
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x6F: // 110_1111: JAL = Jump and link
 		imm := parseImmTypeJ(instr)
-		rdValue := add64(pc, toU64(4))
+		rdValue := add64(pc, byteToU64(4))
 		setRegister(rd, rdValue)
 
-		newPC := add64(pc, signExtend64(shl64(toU64(1), imm), toU64(20)))
+		newPC := add64(pc, signExtend64(shl64(byteToU64(1), imm), byteToU64(20)))
 		if newPC&3 != 0 { // quick target alignment check
 			revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("pc %d not aligned with 4 bytes", newPC))
 		}
@@ -848,10 +848,10 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 	case 0x67: // 110_0111: JALR = Jump and link register
 		rs1Value := getRegister(rs1)
 		imm := parseImmTypeI(instr)
-		rdValue := add64(pc, toU64(4))
+		rdValue := add64(pc, byteToU64(4))
 		setRegister(rd, rdValue)
 
-		newPC := and64(add64(rs1Value, signExtend64(imm, toU64(11))), xor64(u64Mask(), toU64(1)))
+		newPC := and64(add64(rs1Value, signExtend64(imm, byteToU64(11))), xor64(u64Mask(), byteToU64(1)))
 		if newPC&3 != 0 { // quick addr alignment check
 			revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("pc %d not aligned with 4 bytes", newPC))
 		}
@@ -859,21 +859,21 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 	case 0x73: // 111_0011: environment things
 		switch funct3 {
 		case 0: // 000 = ECALL/EBREAK
-			switch shr64(toU64(20), instr) { // I-type, top 12 bits
+			switch shr64(byteToU64(20), instr) { // I-type, top 12 bits
 			case 0: // imm12 = 000000000000 ECALL
 				sysCall()
-				setPC(add64(pc, toU64(4)))
+				setPC(add64(pc, byteToU64(4)))
 			default: // imm12 = 000000000001 EBREAK
-				setPC(add64(pc, toU64(4))) // ignore breakpoint
+				setPC(add64(pc, byteToU64(4))) // ignore breakpoint
 			}
 		default: // CSR instructions
 			setRegister(rd, 0) // ignore CSR instructions
-			setPC(add64(pc, toU64(4)))
+			setPC(add64(pc, byteToU64(4)))
 		}
 	case 0x2F: // 010_1111: RV32A and RV32A atomic operations extension
 		// acquire and release bits:
-		//   aq := and64(shr64(toU64(1), funct7), toU64(1))
-		//   rl := and64(funct7, toU64(1))
+		//   aq := and64(shr64(byteToU64(1), funct7), byteToU64(1))
+		//   rl := and64(funct7, byteToU64(1))
 		// if none set: unordered
 		// if aq is set: no following mem ops observed before acquire mem op
 		// if rl is set: release mem op not observed before earlier mem ops
@@ -882,8 +882,8 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 
 		// 0b010 == RV32A W variants
 		// 0b011 == RV64A D variants
-		size := shl64(funct3, toU64(1))
-		if lt64(size, toU64(4)) != 0 || gt64(size, toU64(8)) != 0 {
+		size := shl64(funct3, byteToU64(1))
+		if lt64(size, byteToU64(4)) != 0 || gt64(size, byteToU64(8)) != 0 {
 			revertWithCode(riscv.ErrBadAMOSize, fmt.Errorf("bad AMO size: %d", size))
 		}
 		addr := getRegister(rs1)
@@ -891,24 +891,24 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 			revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("addr %d not aligned with 4 bytes", addr))
 		}
 
-		op := shr64(toU64(2), funct7)
+		op := shr64(byteToU64(2), funct7)
 		switch op {
 		case 0x2: // 00010 = LR = Load Reserved
 			v := loadMem(addr, size, true, 1, 2)
 			setRegister(rd, v)
 			setLoadReservation(addr)
 		case 0x3: // 00011 = SC = Store Conditional
-			rdValue := toU64(1)
+			rdValue := byteToU64(1)
 			if eq64(addr, getLoadReservation()) != 0 {
 				rs2Value := getRegister(rs2)
 				storeMem(addr, size, rs2Value, 1, 2, true, true)
-				rdValue = toU64(0)
+				rdValue = byteToU64(0)
 			}
 			setRegister(rd, rdValue)
-			setLoadReservation(toU64(0))
+			setLoadReservation(byteToU64(0))
 		default: // AMO: Atomic Memory Operation
 			rs2Value := getRegister(rs2)
-			if eq64(size, toU64(4)) != 0 {
+			if eq64(size, byteToU64(4)) != 0 {
 				rs2Value = mask32Signed64(rs2Value)
 			}
 			value := rs2Value
@@ -947,18 +947,18 @@ func (inst *InstrumentedState) riscvStep() (outErr error) {
 			storeMem(addr, size, v, 1, 3, false, true) // after overwriting 1, proof 2 is no longer valid
 			setRegister(rd, rdValue)
 		}
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x0F: // 000_1111: fence
 		// Used to impose additional ordering constraints; flushing the mem operation pipeline.
 		// This VM doesn't have a pipeline, nor additional harts, so this is a no-op.
 		// FENCE / FENCE.TSO / FENCE.I all no-op: there's nothing to synchronize.
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x07: // FLW/FLD: floating point load word/double
-		setPC(add64(pc, toU64(4))) // no-op this.
+		setPC(add64(pc, byteToU64(4))) // no-op this.
 	case 0x27: // FSW/FSD: floating point store word/double
-		setPC(add64(pc, toU64(4))) // no-op this.
+		setPC(add64(pc, byteToU64(4))) // no-op this.
 	case 0x53: // FADD etc. no-op is enough to pass Go runtime check
-		setPC(add64(pc, toU64(4))) // no-op this.
+		setPC(add64(pc, byteToU64(4))) // no-op this.
 	default:
 		revertWithCode(riscv.ErrUnknownOpCode, fmt.Errorf("unknown instruction opcode: %d", opcode))
 	}
diff --git a/rvgo/fast/yul64.go b/rvgo/fast/yul64.go
index c9b0099f..aeb4eda4 100644
--- a/rvgo/fast/yul64.go
+++ b/rvgo/fast/yul64.go
@@ -6,11 +6,11 @@ import "github.com/holiman/uint256"
 
 type U64 = uint64
 
-func toU256(v uint8) U256 {
+func byteToU256(v uint8) U256 {
 	return *uint256.NewInt(uint64(v))
 }
 
-func toU64(v uint8) U64 { return uint64(v) }
+func byteToU64(v uint8) U64 { return uint64(v) }
 
 func shortToU64(v uint16) U64 {
 	return uint64(v)
@@ -42,7 +42,7 @@ func u32Mask() uint64 {
 }
 
 func mask32Signed64(v U64) U64 {
-	return signExtend64(and64(v, u32Mask()), toU64(31))
+	return signExtend64(and64(v, u32Mask()), byteToU64(31))
 }
 
 func signExtend64(v uint64, bit uint64) uint64 {
@@ -61,7 +61,7 @@ func signExtend64To256(v U64) U256 {
 	case 0:
 		return *new(uint256.Int).SetUint64(v)
 	default:
-		return or(shl(toU256(64), not(U256{})), *new(uint256.Int).SetUint64(v))
+		return or(shl(byteToU256(64), not(U256{})), *new(uint256.Int).SetUint64(v))
 	}
 }
 
diff --git a/rvgo/slow/parse.go b/rvgo/slow/parse.go
index 7ca4f17f..069ba1c6 100644
--- a/rvgo/slow/parse.go
+++ b/rvgo/slow/parse.go
@@ -4,74 +4,74 @@ package slow
 // These should 1:1 match with the same definitions in the fast package.
 
 func parseImmTypeI(instr U64) U64 {
-	return signExtend64(shr64(toU64(20), instr), toU64(11))
+	return signExtend64(shr64(byteToU64(20), instr), byteToU64(11))
 }
 
 func parseImmTypeS(instr U64) U64 {
 	return signExtend64(
 		or64(
-			shl64(toU64(5), shr64(toU64(25), instr)),
-			and64(shr64(toU64(7), instr), toU64(0x1F)),
+			shl64(byteToU64(5), shr64(byteToU64(25), instr)),
+			and64(shr64(byteToU64(7), instr), byteToU64(0x1F)),
 		),
-		toU64(11))
+		byteToU64(11))
 }
 
 func parseImmTypeB(instr U64) U64 {
 	return signExtend64(
 		or64(
 			or64(
-				shl64(toU64(1), and64(shr64(toU64(8), instr), toU64(0xF))),
-				shl64(toU64(5), and64(shr64(toU64(25), instr), toU64(0x3F))),
+				shl64(byteToU64(1), and64(shr64(byteToU64(8), instr), byteToU64(0xF))),
+				shl64(byteToU64(5), and64(shr64(byteToU64(25), instr), byteToU64(0x3F))),
 			),
 			or64(
-				shl64(toU64(11), and64(shr64(toU64(7), instr), toU64(1))),
-				shl64(toU64(12), shr64(toU64(31), instr)),
+				shl64(byteToU64(11), and64(shr64(byteToU64(7), instr), byteToU64(1))),
+				shl64(byteToU64(12), shr64(byteToU64(31), instr)),
 			),
 		),
-		toU64(12),
+		byteToU64(12),
 	)
 }
 
 func parseImmTypeU(instr U64) U64 {
-	return signExtend64(shr64(toU64(12), instr), toU64(19))
+	return signExtend64(shr64(byteToU64(12), instr), byteToU64(19))
 }
 
 func parseImmTypeJ(instr U64) U64 {
 	return signExtend64(
 		or64(
 			or64(
-				and64(shr64(toU64(21), instr), shortToU64(0x3FF)),          // 10 bits for index 0:9
-				shl64(toU64(10), and64(shr64(toU64(20), instr), toU64(1))), // 1 bit for index 10
+				and64(shr64(byteToU64(21), instr), shortToU64(0x3FF)),                  // 10 bits for index 0:9
+				shl64(byteToU64(10), and64(shr64(byteToU64(20), instr), byteToU64(1))), // 1 bit for index 10
 			),
 			or64(
-				shl64(toU64(11), and64(shr64(toU64(12), instr), toU64(0xFF))), // 8 bits for index 11:18
-				shl64(toU64(19), shr64(toU64(31), instr)),                     // 1 bit for index 19
+				shl64(byteToU64(11), and64(shr64(byteToU64(12), instr), byteToU64(0xFF))), // 8 bits for index 11:18
+				shl64(byteToU64(19), shr64(byteToU64(31), instr)),                         // 1 bit for index 19
 			),
 		),
-		toU64(19),
+		byteToU64(19),
 	)
 }
 
 func parseOpcode(instr U64) U64 {
-	return and64(instr, toU64(0x7F))
+	return and64(instr, byteToU64(0x7F))
 }
 
 func parseRd(instr U64) U64 {
-	return and64(shr64(toU64(7), instr), toU64(0x1F))
+	return and64(shr64(byteToU64(7), instr), byteToU64(0x1F))
 }
 
 func parseFunct3(instr U64) U64 {
-	return and64(shr64(toU64(12), instr), toU64(0x7))
+	return and64(shr64(byteToU64(12), instr), byteToU64(0x7))
 }
 
 func parseRs1(instr U64) U64 {
-	return and64(shr64(toU64(15), instr), toU64(0x1F))
+	return and64(shr64(byteToU64(15), instr), byteToU64(0x1F))
 }
 
 func parseRs2(instr U64) U64 {
-	return and64(shr64(toU64(20), instr), toU64(0x1F))
+	return and64(shr64(byteToU64(20), instr), byteToU64(0x1F))
 }
 
 func parseFunct7(instr U64) U64 {
-	return shr64(toU64(25), instr)
+	return shr64(byteToU64(25), instr)
 }
diff --git a/rvgo/slow/vm.go b/rvgo/slow/vm.go
index c8294b77..e43d0540 100644
--- a/rvgo/slow/vm.go
+++ b/rvgo/slow/vm.go
@@ -129,7 +129,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 	}
 
 	stateContentOffset := uint8(4 + 32 + 32 + 32 + 32)
-	if iszero(eq(b32asBEWord(calldataload(toU64(4+32*3))), shortToU256(stateSize))) {
+	if iszero(eq(b32asBEWord(calldataload(byteToU64(4+32*3))), shortToU256(stateSize))) {
 		// user-provided state size must match expected state size
 		panic("invalid state size input")
 	}
@@ -225,11 +225,11 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 	}
 
 	getRegister := func(reg U64) U64 {
-		if gt64(reg, toU64(31)) != (U64{}) {
+		if gt64(reg, byteToU64(31)) != (U64{}) {
 			revertWithCode(riscv.ErrInvalidRegister, fmt.Errorf("cannot load invalid register: %d", reg.val()))
 		}
 		//fmt.Printf("load reg %2d: %016x\n", reg, state.Registers[reg])
-		offset := add64(toU64(stateOffsetRegisters), mul64(reg, toU64(8)))
+		offset := add64(byteToU64(stateOffsetRegisters), mul64(reg, byteToU64(8)))
 		return decodeU64BE(readState(offset.val(), 8))
 	}
 	setRegister := func(reg U64, v U64) {
@@ -238,10 +238,10 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 			// v is a HINT, but no hints are specified by standard spec, or used by us.
 			return
 		}
-		if gt64(reg, toU64(31)) != (U64{}) {
+		if gt64(reg, byteToU64(31)) != (U64{}) {
 			revertWithCode(riscv.ErrInvalidRegister, fmt.Errorf("unknown register %d, cannot write %x", reg.val(), v.val()))
 		}
-		offset := add64(toU64(stateOffsetRegisters), mul64(reg, toU64(8)))
+		offset := add64(byteToU64(stateOffsetRegisters), mul64(reg, byteToU64(8)))
 		writeState(offset.val(), 8, encodeU64BE(v))
 	}
 
@@ -280,7 +280,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 	//
 	proofOffset := func(proofIndex uint8) (offset U64) {
 		// proof size: 64-5+1=60 (a 64-bit mem-address branch to 32 byte leaf, incl leaf itself), all 32 bytes
-		offset = mul64(mul64(toU64(proofIndex), toU64(60)), toU64(32))
+		offset = mul64(mul64(byteToU64(proofIndex), byteToU64(60)), byteToU64(32))
 		offset = add64(offset, proofContentOffset)
 		return
 	}
@@ -290,19 +290,19 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 	}
 
 	getMemoryB32 := func(addr U64, proofIndex uint8) (out [32]byte) {
-		if and64(addr, toU64(31)) != (U64{}) { // quick addr alignment check
+		if and64(addr, byteToU64(31)) != (U64{}) { // quick addr alignment check
 			revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("addr %d not aligned with 32 bytes", addr))
 		}
 		offset := proofOffset(proofIndex)
 		leaf := calldataload(offset)
-		offset = add64(offset, toU64(32))
+		offset = add64(offset, byteToU64(32))
 
-		path := shr64(toU64(5), addr) // 32 bytes of memory per leaf
-		node := leaf                  // starting from the leaf node, work back up by combining with siblings, to reconstruct the root
+		path := shr64(byteToU64(5), addr) // 32 bytes of memory per leaf
+		node := leaf                      // starting from the leaf node, work back up by combining with siblings, to reconstruct the root
 		for i := uint8(0); i < 64-5; i++ {
 			sibling := calldataload(offset)
-			offset = add64(offset, toU64(32))
-			switch and64(shr64(toU64(i), path), toU64(1)).val() {
+			offset = add64(offset, byteToU64(32))
+			switch and64(shr64(byteToU64(i), path), byteToU64(1)).val() {
 			case 0:
 				node = hashPair(node, sibling)
 			case 1:
@@ -320,19 +320,19 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 	// warning: setMemoryB32 does not verify the proof,
 	// it assumes the same memory proof has been verified with getMemoryB32
 	setMemoryB32 := func(addr U64, v [32]byte, proofIndex uint8) {
-		if and64(addr, toU64(31)) != (U64{}) {
+		if and64(addr, byteToU64(31)) != (U64{}) {
 			revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("addr %d not aligned with 32 bytes", addr))
 		}
 		offset := proofOffset(proofIndex)
 		leaf := v
-		offset = add64(offset, toU64(32))
-		path := shr64(toU64(5), addr) // 32 bytes of memory per leaf
-		node := leaf                  // starting from the leaf node, work back up by combining with siblings, to reconstruct the root
+		offset = add64(offset, byteToU64(32))
+		path := shr64(byteToU64(5), addr) // 32 bytes of memory per leaf
+		node := leaf                      // starting from the leaf node, work back up by combining with siblings, to reconstruct the root
 		for i := uint8(0); i < 64-5; i++ {
 			sibling := calldataload(offset)
-			offset = add64(offset, toU64(32))
+			offset = add64(offset, byteToU64(32))
 
-			switch and64(shr64(toU64(i), path), toU64(1)).val() {
+			switch and64(shr64(byteToU64(i), path), byteToU64(1)).val() {
 			case 0:
 				node = hashPair(node, sibling)
 			case 1:
@@ -348,14 +348,14 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 			revertWithCode(riscv.ErrLoadExceeds8Bytes, fmt.Errorf("cannot load more than 8 bytes: %d", size))
 		}
 		// load/verify left part
-		leftAddr := and64(addr, not64(toU64(31)))
+		leftAddr := and64(addr, not64(byteToU64(31)))
 		left := b32asBEWord(getMemoryB32(leftAddr, proofIndexL))
 		alignment := sub64(addr, leftAddr)
 
 		right := U256{}
-		rightAddr := and64(add64(addr, sub64(size, toU64(1))), not64(toU64(31)))
-		leftShamt := sub64(sub64(toU64(32), alignment), size)
-		rightShamt := toU64(0)
+		rightAddr := and64(add64(addr, sub64(size, byteToU64(1))), not64(byteToU64(31)))
+		leftShamt := sub64(sub64(byteToU64(32), alignment), size)
+		rightShamt := byteToU64(0)
 		if iszero64(eq64(leftAddr, rightAddr)) {
 			// if unaligned, use second proof for the right part
 			if proofIndexR == 0xff {
@@ -364,35 +364,35 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 			// load/verify right part
 			right = b32asBEWord(getMemoryB32(rightAddr, proofIndexR))
 			// left content is aligned to right of 32 bytes
-			leftShamt = toU64(0)
-			rightShamt = sub64(sub64(toU64(64), alignment), size)
+			leftShamt = byteToU64(0)
+			rightShamt = sub64(sub64(byteToU64(64), alignment), size)
 		}
 
 		// left: prepare for byte-taking by right-aligning
-		left = shr(u64ToU256(shl64(toU64(3), leftShamt)), left)
+		left = shr(u64ToU256(shl64(byteToU64(3), leftShamt)), left)
 		// right: right-align for byte-taking by right-aligning
-		right = shr(u64ToU256(shl64(toU64(3), rightShamt)), right)
+		right = shr(u64ToU256(shl64(byteToU64(3), rightShamt)), right)
 		// loop:
 		for i := uint8(0); i < uint8(size.val()); i++ {
 			// translate to reverse byte lookup, since we are reading little-endian memory, and need the highest byte first.
 			// effAddr := (addr + size - 1 - i) &^ 31
-			effAddr := and64(sub64(sub64(add64(addr, size), toU64(1)), toU64(i)), not64(toU64(31)))
+			effAddr := and64(sub64(sub64(add64(addr, size), byteToU64(1)), byteToU64(i)), not64(byteToU64(31)))
 			// take a byte from either left or right, depending on the effective address
-			b := toU256(0)
+			b := byteToU256(0)
 			switch eq64(effAddr, leftAddr).val() {
 			case 1:
-				b = and(left, toU256(0xff))
-				left = shr(toU256(8), left)
+				b = and(left, byteToU256(0xff))
+				left = shr(byteToU256(8), left)
 			case 0:
-				b = and(right, toU256(0xff))
-				right = shr(toU256(8), right)
+				b = and(right, byteToU256(0xff))
+				right = shr(byteToU256(8), right)
 			}
 			// append it to the output
-			out = or64(shl64(toU64(8), out), u256ToU64(b))
+			out = or64(shl64(byteToU64(8), out), u256ToU64(b))
 		}
 
 		if signed {
-			signBitShift := sub64(shl64(toU64(3), size), toU64(1))
+			signBitShift := sub64(shl64(byteToU64(3), size), byteToU64(1))
 			out = signExtend64(out, signBitShift)
 		}
 		return
@@ -403,25 +403,25 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 		start := alignment
 		end := add64(alignment, size)
 		for i := uint8(0); i < 64; i++ {
-			index := toU64(i)
-			leftSide := lt64(index, toU64(32))
+			index := byteToU64(i)
+			leftSide := lt64(index, byteToU64(32))
 			switch leftSide.val() {
 			case 1:
-				leftPatch = shl(toU256(8), leftPatch)
-				leftMask = shl(toU256(8), leftMask)
+				leftPatch = shl(byteToU256(8), leftPatch)
+				leftMask = shl(byteToU256(8), leftMask)
 			case 0:
-				rightPatch = shl(toU256(8), rightPatch)
-				rightMask = shl(toU256(8), rightMask)
+				rightPatch = shl(byteToU256(8), rightPatch)
+				rightMask = shl(byteToU256(8), rightMask)
 			}
-			if and64(eq64(lt64(index, start), toU64(0)), lt64(index, end)) != (U64{}) { // if alignment <= i < alignment+size
-				b := and(shr(u64ToU256(shl64(toU64(3), sub64(index, alignment))), value), toU256(0xff))
+			if and64(eq64(lt64(index, start), byteToU64(0)), lt64(index, end)) != (U64{}) { // if alignment <= i < alignment+size
+				b := and(shr(u64ToU256(shl64(byteToU64(3), sub64(index, alignment))), value), byteToU256(0xff))
 				switch leftSide.val() {
 				case 1:
 					leftPatch = or(leftPatch, b)
-					leftMask = or(leftMask, toU256(0xff))
+					leftMask = or(leftMask, byteToU256(0xff))
 				case 0:
 					rightPatch = or(rightPatch, b)
-					rightMask = or(rightMask, toU256(0xff))
+					rightMask = or(rightMask, byteToU256(0xff))
 				}
 			}
 		}
@@ -433,8 +433,8 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 			revertWithCode(riscv.ErrStoreExceeds32Bytes, fmt.Errorf("cannot store more than 32 bytes: %d", size))
 		}
 
-		leftAddr := and64(addr, not64(toU64(31)))
-		rightAddr := and64(add64(addr, sub64(size, toU64(1))), not64(toU64(31)))
+		leftAddr := and64(addr, not64(byteToU64(31)))
+		rightAddr := and64(add64(addr, sub64(size, byteToU64(1))), not64(byteToU64(31)))
 		alignment := sub64(addr, leftAddr)
 		leftMask, rightMask, leftPatch, rightPatch := leftAndRight(alignment, size, value)
 
@@ -472,19 +472,19 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 	//
 	writePreimageKey := func(addr U64, count U64) (out U64) {
 		// adjust count down, so we only have to read a single 32 byte leaf of memory
-		alignment := and64(addr, toU64(31))
-		maxData := sub64(toU64(32), alignment)
+		alignment := and64(addr, byteToU64(31))
+		maxData := sub64(byteToU64(32), alignment)
 		if gt64(count, maxData) != (U64{}) {
 			count = maxData
 		}
 
 		dat := b32asBEWord(getMemoryB32(sub64(addr, alignment), 1))
 		// shift out leading bits
-		dat = shl(u64ToU256(shl64(toU64(3), alignment)), dat)
+		dat = shl(u64ToU256(shl64(byteToU64(3), alignment)), dat)
 		// shift to right end, remove trailing bits
-		dat = shr(u64ToU256(shl64(toU64(3), sub64(toU64(32), count))), dat)
+		dat = shr(u64ToU256(shl64(byteToU64(3), sub64(byteToU64(32), count))), dat)
 
-		bits := shl(toU256(3), u64ToU256(count))
+		bits := shl(byteToU256(3), u64ToU256(count))
 
 		preImageKey := getPreimageKey()
 
@@ -495,7 +495,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 
 		// We reset the pre-image value offset back to 0 (the right part of the merkle pair)
 		setPreimageKey(beWordAsB32(key))
-		setPreimageOffset(toU64(0))
+		setPreimageOffset(byteToU64(0))
 		out = count
 		return
 	}
@@ -504,7 +504,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 		d, l, err := po.ReadPreimagePart(key, offset.val())
 		if err == nil {
 			dat = d
-			datlen = toU64(l)
+			datlen = byteToU64(l)
 			return
 		}
 		revertWithCode(riscv.ErrFailToReadPreimage, err)
@@ -518,11 +518,11 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 		// make call to pre-image oracle contract
 		pdatB32, pdatlen := readPreimagePart(preImageKey, offset)
 		if iszero64(pdatlen) { // EOF
-			out = toU64(0)
+			out = byteToU64(0)
 			return
 		}
-		alignment := and64(addr, toU64(31))    // how many bytes addr is offset from being left-aligned
-		maxData := sub64(toU64(32), alignment) // higher alignment leaves less room for data this step
+		alignment := and64(addr, byteToU64(31))    // how many bytes addr is offset from being left-aligned
+		maxData := sub64(byteToU64(32), alignment) // higher alignment leaves less room for data this step
 		if gt64(count, maxData) != (U64{}) {
 			count = maxData
 		}
@@ -530,9 +530,9 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 			count = pdatlen
 		}
 
-		bits := shl64(toU64(3), sub64(toU64(32), count))             // 32-count, in bits
-		mask := not(sub(shl(u64ToU256(bits), toU256(1)), toU256(1))) // left-aligned mask for count bytes
-		alignmentBits := u64ToU256(shl64(toU64(3), alignment))
+		bits := shl64(byteToU64(3), sub64(byteToU64(32), count))             // 32-count, in bits
+		mask := not(sub(shl(u64ToU256(bits), byteToU256(1)), byteToU256(1))) // left-aligned mask for count bytes
+		alignmentBits := u64ToU256(shl64(byteToU64(3), alignment))
 		mask = shr(alignmentBits, mask)                  // mask of count bytes, shifted by alignment
 		pdat := shr(alignmentBits, b32asBEWord(pdatB32)) // pdat, shifted by alignment
 
@@ -552,42 +552,42 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 	// Syscall handling
 	//
 	sysCall := func() {
-		a7 := getRegister(toU64(17))
+		a7 := getRegister(byteToU64(17))
 		switch a7.val() {
 		case riscv.SysExit: // exit the calling thread. No multi-thread support yet, so just exit.
-			a0 := getRegister(toU64(10))
+			a0 := getRegister(byteToU64(10))
 			setExitCode(uint8(a0.val()))
 			setExited()
 			// program stops here, no need to change registers.
 		case riscv.SysExitGroup: // exit-group
-			a0 := getRegister(toU64(10))
+			a0 := getRegister(byteToU64(10))
 			setExitCode(uint8(a0.val()))
 			setExited()
 		case riscv.SysBrk: // brk
 			// Go sys_linux_riscv64 runtime will only ever call brk(NULL), i.e. first argument (register a0) set to 0.
 
 			// brk(0) changes nothing about the memory, and returns the current page break
-			v := shl64(toU64(30), toU64(1)) // set program break at 1 GiB
-			setRegister(toU64(10), v)
-			setRegister(toU64(11), toU64(0)) // no error
+			v := shl64(byteToU64(30), byteToU64(1)) // set program break at 1 GiB
+			setRegister(byteToU64(10), v)
+			setRegister(byteToU64(11), byteToU64(0)) // no error
 		case riscv.SysMmap: // mmap
 			// A0 = addr (hint)
-			addr := getRegister(toU64(10))
+			addr := getRegister(byteToU64(10))
 			// A1 = n (length)
-			length := getRegister(toU64(11))
+			length := getRegister(byteToU64(11))
 			// A2 = prot (memory protection type, can ignore)
 			// A3 = flags (shared with other process and or written back to file)
-			flags := getRegister(toU64(13))
+			flags := getRegister(byteToU64(13))
 			// A4 = fd (file descriptor, can ignore because we support anon memory only)
-			fd := getRegister(toU64(14))
+			fd := getRegister(byteToU64(14))
 			// A5 = offset (offset in file, we don't support any non-anon memory, so we can ignore this)
 
-			errCode := toU64(0)
+			errCode := byteToU64(0)
 
 			// ensure MAP_ANONYMOUS is set and fd == -1
 			if (flags.val()&0x20) == 0 || fd != u64Mask() {
 				addr = u64Mask()
-				errCode = toU64(0x4d) // no error
+				errCode = byteToU64(0x4d) // no error
 			} else {
 				// ignore: prot, flags, fd, offset
 				switch addr.val() {
@@ -607,131 +607,131 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 					//fmt.Printf("mmap: 0x%016x (0x%x allowed)\n", addr, length)
 				}
 			}
-			setRegister(toU64(10), addr)
-			setRegister(toU64(11), errCode)
+			setRegister(byteToU64(10), addr)
+			setRegister(byteToU64(11), errCode)
 		case riscv.SysRead: // read
-			fd := getRegister(toU64(10))    // A0 = fd
-			addr := getRegister(toU64(11))  // A1 = *buf addr
-			count := getRegister(toU64(12)) // A2 = count
+			fd := getRegister(byteToU64(10))    // A0 = fd
+			addr := getRegister(byteToU64(11))  // A1 = *buf addr
+			count := getRegister(byteToU64(12)) // A2 = count
 			var n U64
 			var errCode U64
 			switch fd.val() {
 			case riscv.FdStdin: // stdin
-				n = toU64(0) // never read anything from stdin
-				errCode = toU64(0)
+				n = byteToU64(0) // never read anything from stdin
+				errCode = byteToU64(0)
 			case riscv.FdHintRead: // hint-read
 				// say we read it all, to continue execution after reading the hint-write ack response
 				n = count
-				errCode = toU64(0)
+				errCode = byteToU64(0)
 			case riscv.FdPreimageRead: // preimage read
 				n = readPreimageValue(addr, count)
-				errCode = toU64(0)
+				errCode = byteToU64(0)
 			default:
-				n = u64Mask()         //  -1 (reading error)
-				errCode = toU64(0x4d) // EBADF
+				n = u64Mask()             //  -1 (reading error)
+				errCode = byteToU64(0x4d) // EBADF
 			}
-			setRegister(toU64(10), n)
-			setRegister(toU64(11), errCode)
+			setRegister(byteToU64(10), n)
+			setRegister(byteToU64(11), errCode)
 		case riscv.SysWrite: // write
-			fd := getRegister(toU64(10))    // A0 = fd
-			addr := getRegister(toU64(11))  // A1 = *buf addr
-			count := getRegister(toU64(12)) // A2 = count
+			fd := getRegister(byteToU64(10))    // A0 = fd
+			addr := getRegister(byteToU64(11))  // A1 = *buf addr
+			count := getRegister(byteToU64(12)) // A2 = count
 			var n U64
 			var errCode U64
 			switch fd.val() {
 			case riscv.FdStdout: // stdout
 				n = count // write completes fully in single instruction step
-				errCode = toU64(0)
+				errCode = byteToU64(0)
 			case riscv.FdStderr: // stderr
 				n = count // write completes fully in single instruction step
-				errCode = toU64(0)
+				errCode = byteToU64(0)
 			case riscv.FdHintWrite: // hint-write
 				n = count
-				errCode = toU64(0)
+				errCode = byteToU64(0)
 			case riscv.FdPreimageWrite: // pre-image key write
 				n = writePreimageKey(addr, count)
-				errCode = toU64(0) // no error
+				errCode = byteToU64(0) // no error
 			default: // any other file, including (3) hint read (5) preimage read
-				n = u64Mask()         //  -1 (writing error)
-				errCode = toU64(0x4d) // EBADF
+				n = u64Mask()             //  -1 (writing error)
+				errCode = byteToU64(0x4d) // EBADF
 			}
-			setRegister(toU64(10), n)
-			setRegister(toU64(11), errCode)
+			setRegister(byteToU64(10), n)
+			setRegister(byteToU64(11), errCode)
 		case riscv.SysFcntl: // fcntl - file descriptor manipulation / info lookup
-			fd := getRegister(toU64(10))  // A0 = fd
-			cmd := getRegister(toU64(11)) // A1 = cmd
+			fd := getRegister(byteToU64(10))  // A0 = fd
+			cmd := getRegister(byteToU64(11)) // A1 = cmd
 			var out U64
 			var errCode U64
 			switch cmd.val() {
 			case 0x1: // F_GETFD: get file descriptor flags
 				switch fd.val() {
 				case 0: // stdin
-					out = toU64(0) // no flag set
+					out = byteToU64(0) // no flag set
 				case 1: // stdout
-					out = toU64(0) // no flag set
+					out = byteToU64(0) // no flag set
 				case 2: // stderr
-					out = toU64(0) // no flag set
+					out = byteToU64(0) // no flag set
 				case 3: // hint-read
-					out = toU64(0) // no flag set
+					out = byteToU64(0) // no flag set
 				case 4: // hint-write
-					out = toU64(0) // no flag set
+					out = byteToU64(0) // no flag set
 				case 5: // pre-image read
-					out = toU64(0) // no flag set
+					out = byteToU64(0) // no flag set
 				case 6: // pre-image write
-					out = toU64(0) // no flag set
+					out = byteToU64(0) // no flag set
 				default:
 					out = u64Mask()
-					errCode = toU64(0x4d) //EBADF
+					errCode = byteToU64(0x4d) //EBADF
 				}
 			case 0x3: // F_GETFL: get file descriptor flags
 				switch fd.val() {
 				case 0: // stdin
-					out = toU64(0) // O_RDONLY
+					out = byteToU64(0) // O_RDONLY
 				case 1: // stdout
-					out = toU64(1) // O_WRONLY
+					out = byteToU64(1) // O_WRONLY
 				case 2: // stderr
-					out = toU64(1) // O_WRONLY
+					out = byteToU64(1) // O_WRONLY
 				case 3: // hint-read
-					out = toU64(0) // O_RDONLY
+					out = byteToU64(0) // O_RDONLY
 				case 4: // hint-write
-					out = toU64(1) // O_WRONLY
+					out = byteToU64(1) // O_WRONLY
 				case 5: // pre-image read
-					out = toU64(0) // O_RDONLY
+					out = byteToU64(0) // O_RDONLY
 				case 6: // pre-image write
-					out = toU64(1) // O_WRONLY
+					out = byteToU64(1) // O_WRONLY
 				default:
 					out = u64Mask()
-					errCode = toU64(0x4d) // EBADF
+					errCode = byteToU64(0x4d) // EBADF
 				}
 			default: // no other commands: don't allow changing flags, duplicating FDs, etc.
 				out = u64Mask()
-				errCode = toU64(0x16) // EINVAL (cmd not recognized by this kernel)
+				errCode = byteToU64(0x16) // EINVAL (cmd not recognized by this kernel)
 			}
-			setRegister(toU64(10), out)
-			setRegister(toU64(11), errCode) // EBADF
+			setRegister(byteToU64(10), out)
+			setRegister(byteToU64(11), errCode) // EBADF
 		case riscv.SysOpenat: // openat - the Go linux runtime will try to open optional /sys/kernel files for performance hints
-			setRegister(toU64(10), u64Mask())
-			setRegister(toU64(11), toU64(0xd)) // EACCES - no access allowed
+			setRegister(byteToU64(10), u64Mask())
+			setRegister(byteToU64(11), byteToU64(0xd)) // EACCES - no access allowed
 		case riscv.SysClockGettime: // clock_gettime
-			addr := getRegister(toU64(11)) // addr of timespec struct
+			addr := getRegister(byteToU64(11)) // addr of timespec struct
 			// write 1337s + 42ns as time
-			value := or(shortToU256(1337), shl(shortToU256(64), toU256(42)))
-			storeMemUnaligned(addr, toU64(16), value, 1, 2)
-			setRegister(toU64(10), toU64(0))
-			setRegister(toU64(11), toU64(0))
+			value := or(shortToU256(1337), shl(shortToU256(64), byteToU256(42)))
+			storeMemUnaligned(addr, byteToU64(16), value, 1, 2)
+			setRegister(byteToU64(10), byteToU64(0))
+			setRegister(byteToU64(11), byteToU64(0))
 		case riscv.SysClone: // clone - not supported
-			setRegister(toU64(10), toU64(1))
-			setRegister(toU64(11), toU64(0))
+			setRegister(byteToU64(10), byteToU64(1))
+			setRegister(byteToU64(11), byteToU64(0))
 		case riscv.SysGetrlimit: // getrlimit
-			res := getRegister(toU64(10))
-			addr := getRegister(toU64(11))
+			res := getRegister(byteToU64(10))
+			addr := getRegister(byteToU64(11))
 			switch res.val() {
 			case 0x7: // RLIMIT_NOFILE
 				// first 8 bytes: soft limit. 1024 file handles max open
 				// second 8 bytes: hard limit
-				storeMemUnaligned(addr, toU64(16), or(shortToU256(1024), shl(toU256(64), shortToU256(1024))), 1, 2)
-				setRegister(toU64(10), toU64(0))
-				setRegister(toU64(11), toU64(0))
+				storeMemUnaligned(addr, byteToU64(16), or(shortToU256(1024), shl(byteToU256(64), shortToU256(1024))), 1, 2)
+				setRegister(byteToU64(10), byteToU64(0))
+				setRegister(byteToU64(11), byteToU64(0))
 			default:
 				revertWithCode(riscv.ErrUnrecognizedResource, &UnrecognizedResourceErr{Resource: res})
 			}
@@ -743,8 +743,8 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 			revertWithCode(riscv.ErrInvalidSyscall, &UnsupportedSyscallErr{SyscallNum: a7})
 		default:
 			// Ignore(no-op) unsupported system calls
-			setRegister(toU64(10), toU64(0))
-			setRegister(toU64(11), toU64(0))
+			setRegister(byteToU64(10), byteToU64(0))
+			setRegister(byteToU64(11), byteToU64(0))
 		}
 	}
 
@@ -755,10 +755,10 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 	if getExited() { // early exit if we can
 		return computeStateHash(), nil
 	}
-	setStep(add64(getStep(), toU64(1)))
+	setStep(add64(getStep(), byteToU64(1)))
 
 	pc := getPC()
-	instr := loadMem(pc, toU64(4), false, 0, 0xff) // raw instruction
+	instr := loadMem(pc, byteToU64(4), false, 0, 0xff) // raw instruction
 
 	// these fields are ignored if not applicable to the instruction type / opcode
 	opcode := parseOpcode(instr)
@@ -773,48 +773,48 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 		// LB, LH, LW, LD, LBU, LHU, LWU
 
 		// bits[14:12] set to 111 are reserved
-		if eq64(funct3, toU64(0x7)) != (U64{}) {
+		if eq64(funct3, byteToU64(0x7)) != (U64{}) {
 			revertWithCode(riscv.ErrInvalidSyscall, fmt.Errorf("illegal instruction %d: reserved instruction encoding", instr))
 		}
 
 		imm := parseImmTypeI(instr)
-		signed := iszero64(and64(funct3, toU64(4)))      // 4 = 100 -> bitflag
-		size := shl64(and64(funct3, toU64(3)), toU64(1)) // 3 = 11 -> 1, 2, 4, 8 bytes size
+		signed := iszero64(and64(funct3, byteToU64(4)))          // 4 = 100 -> bitflag
+		size := shl64(and64(funct3, byteToU64(3)), byteToU64(1)) // 3 = 11 -> 1, 2, 4, 8 bytes size
 		rs1Value := getRegister(rs1)
-		memIndex := add64(rs1Value, signExtend64(imm, toU64(11)))
+		memIndex := add64(rs1Value, signExtend64(imm, byteToU64(11)))
 		rdValue := loadMem(memIndex, size, signed, 1, 2)
 		setRegister(rd, rdValue)
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x23: // 010_0011: memory storing
 		// SB, SH, SW, SD
 		imm := parseImmTypeS(instr)
-		size := shl64(funct3, toU64(1))
+		size := shl64(funct3, byteToU64(1))
 		value := getRegister(rs2)
 		rs1Value := getRegister(rs1)
-		memIndex := add64(rs1Value, signExtend64(imm, toU64(11)))
+		memIndex := add64(rs1Value, signExtend64(imm, byteToU64(11)))
 		storeMem(memIndex, size, value, 1, 2)
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x63: // 110_0011: branching
 		rs1Value := getRegister(rs1)
 		rs2Value := getRegister(rs2)
-		branchHit := toU64(0)
+		branchHit := byteToU64(0)
 		switch funct3.val() {
 		case 0: // 000 = BEQ
 			branchHit = eq64(rs1Value, rs2Value)
 		case 1: // 001 = BNE
-			branchHit = and64(not64(eq64(rs1Value, rs2Value)), toU64(1))
+			branchHit = and64(not64(eq64(rs1Value, rs2Value)), byteToU64(1))
 		case 4: // 100 = BLT
 			branchHit = slt64(rs1Value, rs2Value)
 		case 5: // 101 = BGE
-			branchHit = and64(not64(slt64(rs1Value, rs2Value)), toU64(1))
+			branchHit = and64(not64(slt64(rs1Value, rs2Value)), byteToU64(1))
 		case 6: // 110 = BLTU
 			branchHit = lt64(rs1Value, rs2Value)
 		case 7: // 111 = BGEU
-			branchHit = and64(not64(lt64(rs1Value, rs2Value)), toU64(1))
+			branchHit = and64(not64(lt64(rs1Value, rs2Value)), byteToU64(1))
 		}
 		switch branchHit.val() {
 		case 0:
-			pc = add64(pc, toU64(4))
+			pc = add64(pc, byteToU64(4))
 		default:
 			imm := parseImmTypeB(instr)
 			// imm is a signed offset, in multiples of 2 bytes.
@@ -831,7 +831,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 		case 0: // 000 = ADDI
 			rdValue = add64(rs1Value, imm)
 		case 1: // 001 = SLLI
-			rdValue = shl64(and64(imm, toU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode
+			rdValue = shl64(and64(imm, byteToU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode
 		case 2: // 010 = SLTI
 			rdValue = slt64(rs1Value, imm)
 		case 3: // 011 = SLTIU
@@ -839,11 +839,11 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 		case 4: // 100 = XORI
 			rdValue = xor64(rs1Value, imm)
 		case 5: // 101 = SR~
-			switch shr64(toU64(6), imm).val() { // in rv64i the top 6 bits select the shift type
+			switch shr64(byteToU64(6), imm).val() { // in rv64i the top 6 bits select the shift type
 			case 0x00: // 000000 = SRLI
-				rdValue = shr64(and64(imm, toU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode
+				rdValue = shr64(and64(imm, byteToU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode
 			case 0x10: // 010000 = SRAI
-				rdValue = sar64(and64(imm, toU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode
+				rdValue = sar64(and64(imm, byteToU64(0x3F)), rs1Value) // lower 6 bits in 64 bit mode
 			}
 		case 6: // 110 = ORI
 			rdValue = or64(rs1Value, imm)
@@ -851,7 +851,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 			rdValue = and64(rs1Value, imm)
 		}
 		setRegister(rd, rdValue)
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x1B: // 001_1011: immediate arithmetic and logic signed 32 bit
 		rs1Value := getRegister(rs1)
 		imm := parseImmTypeI(instr)
@@ -860,18 +860,18 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 		case 0: // 000 = ADDIW
 			rdValue = mask32Signed64(add64(rs1Value, imm))
 		case 1: // 001 = SLLIW
-			rdValue = mask32Signed64(shl64(and64(imm, toU64(0x1F)), rs1Value))
+			rdValue = mask32Signed64(shl64(and64(imm, byteToU64(0x1F)), rs1Value))
 		case 5: // 101 = SR~
-			shamt := and64(imm, toU64(0x1F))
-			switch shr64(toU64(5), imm).val() { // top 7 bits select the shift type
+			shamt := and64(imm, byteToU64(0x1F))
+			switch shr64(byteToU64(5), imm).val() { // top 7 bits select the shift type
 			case 0x00: // 0000000 = SRLIW
-				rdValue = signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), toU64(31))
+				rdValue = signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), byteToU64(31))
 			case 0x20: // 0100000 = SRAIW
-				rdValue = signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), sub64(toU64(31), shamt))
+				rdValue = signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), sub64(byteToU64(31), shamt))
 			}
 		}
 		setRegister(rd, rdValue)
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x33: // 011_0011: register arithmetic and logic
 		rs1Value := getRegister(rs1)
 		rs2Value := getRegister(rs2)
@@ -882,11 +882,11 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 			case 0: // 000 = MUL: signed x signed
 				rdValue = mul64(rs1Value, rs2Value)
 			case 1: // 001 = MULH: upper bits of signed x signed
-				rdValue = u256ToU64(shr(toU256(64), mul(signExtend64To256(rs1Value), signExtend64To256(rs2Value))))
+				rdValue = u256ToU64(shr(byteToU256(64), mul(signExtend64To256(rs1Value), signExtend64To256(rs2Value))))
 			case 2: // 010 = MULHSU: upper bits of signed x unsigned
-				rdValue = u256ToU64(shr(toU256(64), mul(signExtend64To256(rs1Value), u64ToU256(rs2Value))))
+				rdValue = u256ToU64(shr(byteToU256(64), mul(signExtend64To256(rs1Value), u64ToU256(rs2Value))))
 			case 3: // 011 = MULHU: upper bits of unsigned x unsigned
-				rdValue = u256ToU64(shr(toU256(64), mul(u64ToU256(rs1Value), u64ToU256(rs2Value))))
+				rdValue = u256ToU64(shr(byteToU256(64), mul(u64ToU256(rs1Value), u64ToU256(rs2Value))))
 			case 4: // 100 = DIV
 				switch rs2Value.val() {
 				case 0:
@@ -926,7 +926,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 					rdValue = sub64(rs1Value, rs2Value)
 				}
 			case 1: // 001 = SLL
-				rdValue = shl64(and64(rs2Value, toU64(0x3F)), rs1Value) // only the low 6 bits are consider in RV6VI
+				rdValue = shl64(and64(rs2Value, byteToU64(0x3F)), rs1Value) // only the low 6 bits are consider in RV6VI
 			case 2: // 010 = SLT
 				rdValue = slt64(rs1Value, rs2Value)
 			case 3: // 011 = SLTU
@@ -936,9 +936,9 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 			case 5: // 101 = SR~
 				switch funct7.val() {
 				case 0x00: // 0000000 = SRL
-					rdValue = shr64(and64(rs2Value, toU64(0x3F)), rs1Value) // logical: fill with zeroes
+					rdValue = shr64(and64(rs2Value, byteToU64(0x3F)), rs1Value) // logical: fill with zeroes
 				case 0x20: // 0100000 = SRA
-					rdValue = sar64(and64(rs2Value, toU64(0x3F)), rs1Value) // arithmetic: sign bit is extended
+					rdValue = sar64(and64(rs2Value, byteToU64(0x3F)), rs1Value) // arithmetic: sign bit is extended
 				}
 			case 6: // 110 = OR
 				rdValue = or64(rs1Value, rs2Value)
@@ -947,7 +947,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 			}
 		}
 		setRegister(rd, rdValue)
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x3B: // 011_1011: register arithmetic and logic in 32 bits
 		rs1Value := getRegister(rs1)
 		rs2Value := getRegister(rs2)
@@ -996,68 +996,68 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 					rdValue = mask32Signed64(sub64(and64(rs1Value, u32Mask()), and64(rs2Value, u32Mask())))
 				}
 			case 1: // 001 = SLLW
-				rdValue = mask32Signed64(shl64(and64(rs2Value, toU64(0x1F)), rs1Value))
+				rdValue = mask32Signed64(shl64(and64(rs2Value, byteToU64(0x1F)), rs1Value))
 			case 5: // 101 = SR~
-				shamt := and64(rs2Value, toU64(0x1F))
+				shamt := and64(rs2Value, byteToU64(0x1F))
 				switch funct7.val() {
 				case 0x00: // 0000000 = SRLW
-					rdValue = signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), toU64(31))
+					rdValue = signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), byteToU64(31))
 				case 0x20: // 0100000 = SRAW
-					rdValue = signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), sub64(toU64(31), shamt))
+					rdValue = signExtend64(shr64(shamt, and64(rs1Value, u32Mask())), sub64(byteToU64(31), shamt))
 				}
 			}
 		}
 		setRegister(rd, rdValue)
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x37: // 011_0111: LUI = Load upper immediate
 		imm := parseImmTypeU(instr)
-		rdValue := shl64(toU64(12), imm)
+		rdValue := shl64(byteToU64(12), imm)
 		setRegister(rd, rdValue)
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x17: // 001_0111: AUIPC = Add upper immediate to PC
 		imm := parseImmTypeU(instr)
-		rdValue := add64(pc, signExtend64(shl64(toU64(12), imm), toU64(31)))
+		rdValue := add64(pc, signExtend64(shl64(byteToU64(12), imm), byteToU64(31)))
 		setRegister(rd, rdValue)
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x6F: // 110_1111: JAL = Jump and link
 		imm := parseImmTypeJ(instr)
-		rdValue := add64(pc, toU64(4))
+		rdValue := add64(pc, byteToU64(4))
 		setRegister(rd, rdValue)
 
-		newPC := add64(pc, signExtend64(shl64(toU64(1), imm), toU64(20)))
-		if and64(newPC, toU64(3)) != (U64{}) { // quick target alignment check
+		newPC := add64(pc, signExtend64(shl64(byteToU64(1), imm), byteToU64(20)))
+		if and64(newPC, byteToU64(3)) != (U64{}) { // quick target alignment check
 			revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("pc %d not aligned with 4 bytes", newPC))
 		}
 		setPC(newPC) // signed offset in multiples of 2 bytes (last bit is there, but ignored)
 	case 0x67: // 110_0111: JALR = Jump and link register
 		rs1Value := getRegister(rs1)
 		imm := parseImmTypeI(instr)
-		rdValue := add64(pc, toU64(4))
+		rdValue := add64(pc, byteToU64(4))
 		setRegister(rd, rdValue)
 
-		newPC := and64(add64(rs1Value, signExtend64(imm, toU64(11))), xor64(u64Mask(), toU64(1)))
-		if and64(newPC, toU64(3)) != (U64{}) { // quick target alignment check
+		newPC := and64(add64(rs1Value, signExtend64(imm, byteToU64(11))), xor64(u64Mask(), byteToU64(1)))
+		if and64(newPC, byteToU64(3)) != (U64{}) { // quick target alignment check
 			revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("pc %d not aligned with 4 bytes", newPC))
 		}
 		setPC(newPC) // least significant bit is set to 0
 	case 0x73: // 111_0011: environment things
 		switch funct3.val() {
 		case 0: // 000 = ECALL/EBREAK
-			switch shr64(toU64(20), instr).val() { // I-type, top 12 bits
+			switch shr64(byteToU64(20), instr).val() { // I-type, top 12 bits
 			case 0: // imm12 = 000000000000 ECALL
 				sysCall()
-				setPC(add64(pc, toU64(4)))
+				setPC(add64(pc, byteToU64(4)))
 			default: // imm12 = 000000000001 EBREAK
-				setPC(add64(pc, toU64(4))) // ignore breakpoint
+				setPC(add64(pc, byteToU64(4))) // ignore breakpoint
 			}
 		default: // ignore CSR instructions
-			setRegister(rd, toU64(0)) // ignore CSR instructions
-			setPC(add64(pc, toU64(4)))
+			setRegister(rd, byteToU64(0)) // ignore CSR instructions
+			setPC(add64(pc, byteToU64(4)))
 		}
 	case 0x2F: // 010_1111: RV32A and RV32A atomic operations extension
 		// acquire and release bits:
-		//   aq := and64(shr64(toU64(1), funct7), toU64(1))
-		//   rl := and64(funct7, toU64(1))
+		//   aq := and64(shr64(byteToU64(1), funct7), byteToU64(1))
+		//   rl := and64(funct7, byteToU64(1))
 		// if none set: unordered
 		// if aq is set: no following mem ops observed before acquire mem op
 		// if rl is set: release mem op not observed before earlier mem ops
@@ -1066,33 +1066,33 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 
 		// 0b010 == RV32A W variants
 		// 0b011 == RV64A D variants
-		size := shl64(funct3, toU64(1))
-		if or64(lt64(size, toU64(4)), gt64(size, toU64(8))) != (U64{}) {
+		size := shl64(funct3, byteToU64(1))
+		if or64(lt64(size, byteToU64(4)), gt64(size, byteToU64(8))) != (U64{}) {
 			revertWithCode(riscv.ErrBadAMOSize, fmt.Errorf("bad AMO size: %d", size))
 		}
 		addr := getRegister(rs1)
-		if and64(addr, toU64(3)) != (U64{}) { // quick addr alignment check
+		if and64(addr, byteToU64(3)) != (U64{}) { // quick addr alignment check
 			revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("addr %d not aligned with 4 bytes", addr))
 		}
 
-		op := shr64(toU64(2), funct7)
+		op := shr64(byteToU64(2), funct7)
 		switch op.val() {
 		case 0x2: // 00010 = LR = Load Reserved
 			v := loadMem(addr, size, true, 1, 2)
 			setRegister(rd, v)
 			setLoadReservation(addr)
 		case 0x3: // 00011 = SC = Store Conditional
-			rdValue := toU64(1)
+			rdValue := byteToU64(1)
 			if eq64(addr, getLoadReservation()) != (U64{}) {
 				rs2Value := getRegister(rs2)
 				storeMem(addr, size, rs2Value, 1, 2)
-				rdValue = toU64(0)
+				rdValue = byteToU64(0)
 			}
 			setRegister(rd, rdValue)
-			setLoadReservation(toU64(0))
+			setLoadReservation(byteToU64(0))
 		default: // AMO: Atomic Memory Operation
 			rs2Value := getRegister(rs2)
-			if eq64(size, toU64(4)) != (U64{}) {
+			if eq64(size, byteToU64(4)) != (U64{}) {
 				rs2Value = mask32Signed64(rs2Value)
 			}
 			value := rs2Value
@@ -1131,18 +1131,18 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err
 			storeMem(addr, size, v, 1, 3) // after overwriting 1, proof 2 is no longer valid
 			setRegister(rd, rdValue)
 		}
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x0F: // 000_1111: fence
 		// Used to impose additional ordering constraints; flushing the mem operation pipeline.
 		// This VM doesn't have a pipeline, nor additional harts, so this is a no-op.
 		// FENCE / FENCE.TSO / FENCE.I all no-op: there's nothing to synchronize.
-		setPC(add64(pc, toU64(4)))
+		setPC(add64(pc, byteToU64(4)))
 	case 0x07: // FLW/FLD: floating point load word/double
-		setPC(add64(pc, toU64(4))) // no-op this.
+		setPC(add64(pc, byteToU64(4))) // no-op this.
 	case 0x27: // FSW/FSD: floating point store word/double
-		setPC(add64(pc, toU64(4))) // no-op this.
+		setPC(add64(pc, byteToU64(4))) // no-op this.
 	case 0x53: // FADD etc. no-op is enough to pass Go runtime check
-		setPC(add64(pc, toU64(4))) // no-op this.
+		setPC(add64(pc, byteToU64(4))) // no-op this.
 	default:
 		revertWithCode(riscv.ErrUnknownOpCode, fmt.Errorf("unknown instruction opcode: %d", opcode))
 	}
diff --git a/rvgo/slow/yul64.go b/rvgo/slow/yul64.go
index 08dc87af..13487088 100644
--- a/rvgo/slow/yul64.go
+++ b/rvgo/slow/yul64.go
@@ -11,12 +11,12 @@ func (v U64) val() uint64 {
 	return (*uint256.Int)(&v).Uint64()
 }
 
-func toU256(v uint8) U256 {
+func byteToU256(v uint8) U256 {
 	return *uint256.NewInt(uint64(v))
 }
 
-func toU64(v uint8) U64 {
-	return U64(toU256(v))
+func byteToU64(v uint8) U64 {
+	return U64(byteToU256(v))
 }
 
 func shortToU64(v uint16) U64 {
@@ -41,30 +41,30 @@ func u64ToU256(v U64) U256 {
 }
 
 func u64Mask() U64 { // max uint64
-	return U64(shr(toU256(192), not(U256{}))) // 256-64 = 192
+	return U64(shr(byteToU256(192), not(U256{}))) // 256-64 = 192
 }
 
 func u32Mask() U64 {
-	return U64(shr(toU256(224), not(U256{}))) // 256-32 = 224
+	return U64(shr(byteToU256(224), not(U256{}))) // 256-32 = 224
 }
 
 func mask32Signed64(v U64) U64 {
-	return signExtend64(and64(v, u32Mask()), toU64(31))
+	return signExtend64(and64(v, u32Mask()), byteToU64(31))
 }
 
 func u64Mod() U256 { // 1 << 64
-	return shl(toU256(64), toU256(1))
+	return shl(byteToU256(64), byteToU256(1))
 }
 
 func u64TopBit() U256 { // 1 << 63
-	return shl(toU256(63), toU256(1))
+	return shl(byteToU256(63), byteToU256(1))
 }
 
 func signExtend64(v U64, bit U64) U64 {
-	switch and(U256(v), shl(U256(bit), toU256(1))) {
+	switch and(U256(v), shl(U256(bit), byteToU256(1))) {
 	case U256{}:
 		// fill with zeroes, by masking
-		return U64(and(U256(v), shr(sub(toU256(63), U256(bit)), U256(u64Mask()))))
+		return U64(and(U256(v), shr(sub(byteToU256(63), U256(bit)), U256(u64Mask()))))
 	default:
 		// fill with ones, by or-ing
 		return U64(or(U256(v), shl(U256(bit), shr(U256(bit), U256(u64Mask())))))
@@ -76,7 +76,7 @@ func signExtend64To256(v U64) U256 {
 	case U256{}:
 		return U256(v)
 	default:
-		return or(shl(toU256(64), not(U256{})), U256(v))
+		return or(shl(byteToU256(64), not(U256{})), U256(v))
 	}
 }