Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature: Add setOrGet Function to Cache Implementation #383

Open
wants to merge 12 commits into
base: main
Choose a base branch
from
7 changes: 7 additions & 0 deletions bigcache.go
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,13 @@ func (c *BigCache) Set(key string, entry []byte) error {
return shard.set(key, hashedKey, entry)
}

// SetOrGet saves entry under the key unless already exist in which case return current value under the key
func (c *BigCache) SetOrGet(key string, entry []byte) (actual []byte, loaded bool, err error) {
hashedKey := c.hash.Sum64(key)
shard := c.getShard(hashedKey)
return shard.setOrGet(key, hashedKey, entry)
}

// Append appends entry under the key if key exists, otherwise
// it will set the key (same behaviour as Set()). With Append() you can
// concatenate multiple entries under the same key in an lock optimized way.
Expand Down
34 changes: 32 additions & 2 deletions bigcache_bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ var message = blob('a', 256)
func BenchmarkWriteToCacheWith1Shard(b *testing.B) {
writeToCache(b, 1, 100*time.Second, b.N)
}

func BenchmarkWriteToCacheUsingSetOrGetWith1Shard(b *testing.B) {
writeToCacheWithSetOrGet(b, 1, 100*time.Second, b.N)
}
func BenchmarkWriteToLimitedCacheWithSmallInitSizeAnd1Shard(b *testing.B) {
m := blob('a', 1024)
cache, _ := New(context.Background(), Config{
Expand Down Expand Up @@ -53,6 +55,13 @@ func BenchmarkWriteToCache(b *testing.B) {
})
}
}
func BenchmarkWriteToCacheUsingSetOrGet(b *testing.B) {
for _, shards := range []int{1, 512, 1024, 8192} {
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
writeToCacheWithSetOrGet(b, shards, 100*time.Second, b.N)
})
}
}
func BenchmarkAppendToCache(b *testing.B) {
for _, shards := range []int{1, 512, 1024, 8192} {
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
Expand Down Expand Up @@ -112,7 +121,9 @@ func BenchmarkIterateOverCache(b *testing.B) {
func BenchmarkWriteToCacheWith1024ShardsAndSmallShardInitSize(b *testing.B) {
writeToCache(b, 1024, 100*time.Second, 100)
}

func BenchmarkWriteUsingSetOrGetToCacheWith1024ShardsAndSmallShardInitSize(b *testing.B) {
writeToCacheWithSetOrGet(b, 1024, 100*time.Second, 100)
}
func BenchmarkReadFromCacheNonExistentKeys(b *testing.B) {
for _, shards := range []int{1, 512, 1024, 8192} {
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
Expand Down Expand Up @@ -142,6 +153,25 @@ func writeToCache(b *testing.B, shards int, lifeWindow time.Duration, requestsIn
})
}

func writeToCacheWithSetOrGet(b *testing.B, shards int, lifeWindow time.Duration, requestsInLifeWindow int) {
cache, _ := New(context.Background(), Config{
Shards: shards,
LifeWindow: lifeWindow,
MaxEntriesInWindow: max(requestsInLifeWindow, 100),
MaxEntrySize: 500,
})
rand.Seed(time.Now().Unix())

b.RunParallel(func(pb *testing.PB) {
id := rand.Int()

b.ReportAllocs()
for pb.Next() {
_, _, _ = cache.SetOrGet(fmt.Sprintf("key-%d", id), message)
}
})
}

func appendToCache(b *testing.B, shards int, lifeWindow time.Duration, requestsInLifeWindow int) {
cache, _ := New(context.Background(), Config{
Shards: shards,
Expand Down
77 changes: 77 additions & 0 deletions bigcache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -945,6 +945,83 @@ func TestEntryUpdate(t *testing.T) {
assertEqual(t, []byte("value2"), cachedValue)
}

func TestSetOrGet(t *testing.T) {
t.Parallel()

// given
clock := mockedClock{value: 0}
cache, _ := newBigCache(context.Background(), Config{
Shards: 1,
LifeWindow: 6 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
}, &clock)

// when
entry1, loaded1, _ := cache.SetOrGet("key1", []byte("value1"))
entry2, loaded2, _ := cache.SetOrGet("key1", []byte("value2"))
entry3, loaded3, _ := cache.SetOrGet("key2", []byte("value3"))

cachedValue, _ := cache.Get("key1")

// then
assertEqual(t, []byte("value1"), entry1)
assertEqual(t, []byte("value1"), entry2)
assertEqual(t, []byte("value3"), entry3)
assertEqual(t, []byte("value1"), cachedValue)
assertEqual(t, false, loaded1)
assertEqual(t, true, loaded2)
assertEqual(t, false, loaded3)
}

func TestSetOrGetCollision(t *testing.T) {
t.Parallel()

// given
cache, _ := New(context.Background(), Config{
Shards: 1,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 10,
MaxEntrySize: 256,
Verbose: true,
Hasher: hashStub(5),
})

//when
entry1, loaded1, _ := cache.SetOrGet("a", []byte("value1"))
entry2, loaded2, _ := cache.SetOrGet("b", []byte("value2"))

// then
assertEqual(t, []byte("value1"), entry1)
assertEqual(t, []byte("value2"), entry2)
assertEqual(t, false, loaded1)
assertEqual(t, false, loaded2)
assertEqual(t, cache.Stats().Collisions, int64(1))

}

func TestSetOrGetErrorBiggerThanShardSize(t *testing.T) {
t.Parallel()

// given
cache, _ := New(context.Background(), Config{
Shards: 1,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 1,
HardMaxCacheSize: 1,
})

// when
entry, loaded, err := cache.SetOrGet("key1", blob('a', 1024*1025))

// then
assertEqual(t, blob('a', 1024*1025), entry)
assertEqual(t, false, loaded)
assertEqual(t, "entry is bigger than max shard size", err.Error())

}

func TestOldestEntryDeletionWhenMaxCacheSizeIsReached(t *testing.T) {
t.Parallel()

Expand Down
31 changes: 31 additions & 0 deletions shard.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,37 @@ func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
}
}

func (s *cacheShard) setOrGet(key string, hashedKey uint64, entry []byte) (actual []byte, loaded bool, err error) {
s.lock.Lock()
defer s.lock.Unlock()

wrappedEntry, err := s.getWrappedEntry(hashedKey)
if err == nil {
if entryKey := readKeyFromEntry(wrappedEntry); key == entryKey {
actual = readEntry(wrappedEntry)
s.hit(hashedKey)
return actual, true, nil
} else {

s.collision()
if s.isVerbose {
s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey)
}

delete(s.hashmap, hashedKey)
s.onRemove(wrappedEntry, Deleted)
if s.statsEnabled {
delete(s.hashmapStats, hashedKey)
}
resetHashFromEntry(wrappedEntry)
}
} else if !errors.Is(err, ErrEntryNotFound) {
return entry, false, err
}

return entry, false, s.addNewWithoutLock(key, hashedKey, entry)
}

func (s *cacheShard) addNewWithoutLock(key string, hashedKey uint64, entry []byte) error {
currentTimestamp := uint64(s.clock.Epoch())

Expand Down