Skip to content

Commit

Permalink
avoid a few more copies (#2215)
Browse files Browse the repository at this point in the history
  • Loading branch information
arnetheduck authored May 24, 2024
1 parent 771d6fd commit 0a49833
Show file tree
Hide file tree
Showing 9 changed files with 21 additions and 20 deletions.
7 changes: 3 additions & 4 deletions nimbus/db/aristo/aristo_blobify.nim
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ proc blobify*(vtx: VertexRef): Result[Blob, AristoError] =
## Variant of `blobify()`
var data: Blob
? vtx.blobify data
ok(data)
ok(move(data))

proc blobify*(vGen: openArray[VertexID]; data: var Blob) =
## This function serialises a list of vertex IDs.
Expand Down Expand Up @@ -251,8 +251,7 @@ proc blobify*(filter: FilterRef): Result[Blob, AristoError] =
## ...
var data: Blob
? filter.blobify data
ok data

ok move(data)

proc blobify*(vFqs: openArray[(QueueID,QueueID)]; data: var Blob) =
## This function serialises a list of filter queue IDs.
Expand Down Expand Up @@ -425,7 +424,7 @@ proc deblobify*(data: openArray[byte]; T: type seq[VertexID]): Result[T,AristoEr
## Variant of `deblobify()` for deserialising the vertex ID generator state
var vGen: seq[VertexID]
? data.deblobify vGen
ok vGen
ok move(vGen)

proc deblobify*(data: Blob; filter: var FilterRef): Result[void,AristoError] =
## De-serialise an Aristo DB filter object
Expand Down
2 changes: 1 addition & 1 deletion nimbus/db/aristo/aristo_hashify.nim
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ proc pedigree(
wff.rev.withValue(toVid, val):
val[].incl fromVid
do:
wff.rev[toVid] = @[fromVid].toHashSet
wff.rev[toVid] = [fromVid].toHashSet

# Remove unnecessarey sup-trie roots (e.g. for a storage root)
wff.root.excl fromVid
Expand Down
8 changes: 5 additions & 3 deletions nimbus/db/aristo/aristo_init/rocks_db/rdb_get.nim
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ proc getImpl(rdb: RdbInst; key: RdbKey): Result[Blob,(AristoError,string)] =
# Correct result if needed
if not gotData:
res = EmptyBlob
ok res
ok move(res)

# ------------------------------------------------------------------------------
# Public functions
Expand All @@ -64,9 +64,10 @@ proc getKey*(rdb: var RdbInst; xid: uint64): Result[Blob,(AristoError,string)] =
# Try LRU cache first
let
key = xid.toRdbKey KeyPfx
var
rc = rdb.rdKeyLru.lruFetch(key)
if rc.isOK:
return ok(rc.value)
return ok(move(rc.value))

# Otherwise fetch from backend database
let res = ? rdb.getImpl(key)
Expand All @@ -78,9 +79,10 @@ proc getVtx*(rdb: var RdbInst; xid: uint64): Result[Blob,(AristoError,string)] =
# Try LRU cache first
let
key = xid.toRdbKey VtxPfx
var
rc = rdb.rdVtxLru.lruFetch(key)
if rc.isOK:
return ok(rc.value)
return ok(move(rc.value))

# Otherwise fetch from backend database
let res = ? rdb.getImpl(key)
Expand Down
6 changes: 3 additions & 3 deletions nimbus/db/aristo/aristo_layers.nim
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,13 @@ func dup(sTab: Table[VertexID,VertexRef]): Table[VertexID,VertexRef] =
# Public getters: lazy value lookup for read only versions
# ------------------------------------------------------------------------------

func dirty*(db: AristoDbRef): HashSet[VertexID] =
func dirty*(db: AristoDbRef): lent HashSet[VertexID] =
db.top.final.dirty

func pPrf*(db: AristoDbRef): HashSet[VertexID] =
func pPrf*(db: AristoDbRef): lent HashSet[VertexID] =
db.top.final.pPrf

func vGen*(db: AristoDbRef): seq[VertexID] =
func vGen*(db: AristoDbRef): lent seq[VertexID] =
db.top.final.vGen

# ------------------------------------------------------------------------------
Expand Down
4 changes: 2 additions & 2 deletions nimbus/db/kvt/kvt_init/memory_db.nim
Original file line number Diff line number Diff line change
Expand Up @@ -77,9 +77,9 @@ proc getKvpFn(db: MemBackendRef): GetKvpFn =
proc(key: openArray[byte]): Result[Blob,KvtError] =
if key.len == 0:
return err(KeyInvalid)
let data = db.mdb.tab.getOrVoid @key
var data = db.mdb.tab.getOrVoid @key
if data.isValid:
return ok(data)
return ok(move(data))
err(GetNotFound)

# -------------
Expand Down
4 changes: 2 additions & 2 deletions nimbus/db/kvt/kvt_init/rocks_db.nim
Original file line number Diff line number Diff line change
Expand Up @@ -84,14 +84,14 @@ proc getKvpFn(db: RdbBackendRef): GetKvpFn =
proc(key: openArray[byte]): Result[Blob,KvtError] =

# Get data record
let data = db.rdb.get(key).valueOr:
var data = db.rdb.get(key).valueOr:
when extraTraceMessages:
debug logTxt "getKvpFn() failed", key, error=error[0], info=error[1]
return err(error[0])

# Return if non-empty
if 0 < data.len:
return ok(data)
return ok(move(data))

err(GetNotFound)

Expand Down
2 changes: 1 addition & 1 deletion nimbus/db/kvt/kvt_init/rocks_db/rdb_get.nim
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ proc get*(

if not gotData:
res = EmptyBlob
ok res
ok move(res)

# ------------------------------------------------------------------------------
# End
Expand Down
4 changes: 2 additions & 2 deletions nimbus/db/kvt/kvt_utils.nim
Original file line number Diff line number Diff line change
Expand Up @@ -90,10 +90,10 @@ proc get*(
if key.len == 0:
return err(KeyInvalid)

let data = db.layersGet(key).valueOr:
var data = db.layersGet(key).valueOr:
return db.getBe key

return ok(data)
return ok(move(data))


proc hasKey*(
Expand Down
4 changes: 2 additions & 2 deletions nimbus/db/ledger/distinct_ledgers.nim
Original file line number Diff line number Diff line change
Expand Up @@ -198,10 +198,10 @@ proc init*(
mpt.toPhk.T

proc fetch*(sl: StorageLedger, slot: UInt256): Result[Blob,void] =
let rc = sl.distinctBase.fetch(slot.toBytesBE)
var rc = sl.distinctBase.fetch(slot.toBytesBE)
if rc.isErr:
return err()
ok rc.value
ok move(rc.value)

proc merge*(sl: StorageLedger, slot: UInt256, value: openArray[byte]) =
const info = "StorageLedger/merge(): "
Expand Down

0 comments on commit 0a49833

Please sign in to comment.