Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Get multiple cached values #379

Open
wants to merge 18 commits into
base: main
Choose a base branch
from
Prev Previous commit
Next Next commit
go fmt
raeidish committed Oct 18, 2023
commit ee86c5d94bca7fd80afa275b856aad2fb35e7165
22 changes: 11 additions & 11 deletions bigcache.go
Original file line number Diff line number Diff line change
@@ -150,7 +150,7 @@ type keyInfo struct {
// GetMulti reads entry for each of the keys.
// returns entries in the same order as the provided keys.
// if entry is not found for a given key, the index will contain nil
func (c *BigCache) GetMulti(keys []string) ([][]byte) {
func (c *BigCache) GetMulti(keys []string) [][]byte {
shards := make(map[uint64][]keyInfo, len(c.shards))
entries := make([][]byte, len(keys))

@@ -161,23 +161,23 @@ func (c *BigCache) GetMulti(keys []string) ([][]byte) {
}

for shardKey, keyInfos := range shards {
hits := make([]uint64,0,len(keyInfos))
hits := make([]uint64, 0, len(keyInfos))
shard := c.shards[shardKey]
shard.lock.RLock()
for i := range keyInfos {
entry,_ := shard.getWithoutLock(keyInfos[i].key, keyInfos[i].hashedKey)
entry, _ := shard.getWithoutLock(keyInfos[i].key, keyInfos[i].hashedKey)

if entry != nil {
hits = append(hits, keyInfos[i].hashedKey)
}
if entry != nil {
hits = append(hits, keyInfos[i].hashedKey)
}

entries[keyInfos[i].order] = entry
entries[keyInfos[i].order] = entry
}
shard.lock.RUnlock()
shard.lock.RUnlock()

for i := range hits{
shard.hit(hits[i])
}
for i := range hits {
shard.hit(hits[i])
}
}
return entries
}
96 changes: 48 additions & 48 deletions bigcache_test.go
Original file line number Diff line number Diff line change
@@ -31,49 +31,49 @@ func TestWriteAndGetOnCache(t *testing.T) {

func TestWriteAndGetOnCacheMulti(t *testing.T) {
t.Parallel()
for _, tc := range []struct {
keys []string
data [][]byte
want string
}{
{
keys: []string{"k1","k2","k3","k4","k5"},
data: [][]byte{
blob('a',10),
blob('b',10),
blob('c',10),
blob('d',10),
blob('e',10),
},
want: "Get all values ordered",
},
{
keys: []string{"k1","k2","k3","k4","k5"},
data: [][]byte{
blob('a',10),
blob('b',10),
nil,
blob('d',10),
blob('e',10),
},
want: "Get all values ordered with nil",
},
}{
for _, tc := range []struct {
keys []string
data [][]byte
want string
}{
{
keys: []string{"k1", "k2", "k3", "k4", "k5"},
data: [][]byte{
blob('a', 10),
blob('b', 10),
blob('c', 10),
blob('d', 10),
blob('e', 10),
},
want: "Get all values ordered",
},
{
keys: []string{"k1", "k2", "k3", "k4", "k5"},
data: [][]byte{
blob('a', 10),
blob('b', 10),
nil,
blob('d', 10),
blob('e', 10),
},
want: "Get all values ordered with nil",
},
} {
t.Run(tc.want, func(t *testing.T) {
cache, _ := New(context.Background(), DefaultConfig(5*time.Second))
cache, _ := New(context.Background(), DefaultConfig(5*time.Second))

for i := range tc.keys{
if tc.data[i] != nil{
cache.Set(tc.keys[i],tc.data[i])
}
}
for i := range tc.keys {
if tc.data[i] != nil {
cache.Set(tc.keys[i], tc.data[i])
}
}

cachedValues := cache.GetMulti(tc.keys)
cachedValues := cache.GetMulti(tc.keys)

assertEqual(t,tc.data,cachedValues)
})
assertEqual(t, tc.data, cachedValues)
})

}
}
}

func TestAppendAndGetOnCache(t *testing.T) {
@@ -894,14 +894,14 @@ func TestWriteAndReadManyParallelSameKeyWithStats(t *testing.T) {
ntest := 1000
n := 10
wg.Add(n)
keys := []string{"key1","key2","key3"}
values := [][]byte{blob('a', 1024),blob('b',1024),blob('c',1024)}

keys := []string{"key1", "key2", "key3"}
values := [][]byte{blob('a', 1024), blob('b', 1024), blob('c', 1024)}

for i := 0; i < ntest; i++ {
for j := range keys{
assertEqual(t, nil, cache.Set(keys[j], values[j]))
}
for j := range keys {
assertEqual(t, nil, cache.Set(keys[j], values[j]))
}
}

for j := 0; j < n; j++ {
@@ -916,11 +916,11 @@ func TestWriteAndReadManyParallelSameKeyWithStats(t *testing.T) {

wg.Wait()

assertEqual(t, Stats{Hits: int64(n * ntest* len(keys))}, cache.Stats())
assertEqual(t, Stats{Hits: int64(n * ntest * len(keys))}, cache.Stats())

for i := range keys{
assertEqual(t, ntest*n, int(cache.KeyMetadata(keys[i]).RequestCount))
}
for i := range keys {
assertEqual(t, ntest*n, int(cache.KeyMetadata(keys[i]).RequestCount))
}
}

func TestCacheReset(t *testing.T) {
11 changes: 5 additions & 6 deletions shard.go
Original file line number Diff line number Diff line change
@@ -61,14 +61,14 @@ func (s *cacheShard) getWithInfo(key string, hashedKey uint64) (entry []byte, re

func (s *cacheShard) get(key string, hashedKey uint64) ([]byte, error) {
s.lock.RLock()
entry,err := s.getWithoutLock(key,hashedKey)
entry, err := s.getWithoutLock(key, hashedKey)
s.lock.RUnlock()

if err != nil{
return nil,err
}
if err != nil {
return nil, err
}

s.hit(hashedKey)
s.hit(hashedKey)
return entry, nil
}

@@ -87,7 +87,6 @@ func (s *cacheShard) getWithoutLock(key string, hashedKey uint64) ([]byte, error
}

entry := readEntry(wrappedEntry)


return entry, nil
}