Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature: Add setOrGet Function to Cache Implementation #383

Open
wants to merge 12 commits into
base: main
Choose a base branch
from
7 changes: 7 additions & 0 deletions bigcache.go
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,13 @@ func (c *BigCache) Set(key string, entry []byte) error {
return shard.set(key, hashedKey, entry)
}

// SetOrGet saves entry under the key unless already exist in which case return current value under the key
func (c *BigCache) SetOrGet(key string, entry []byte) (actual []byte, loaded bool, err error) {
hashedKey := c.hash.Sum64(key)
shard := c.getShard(hashedKey)
return shard.setOrGet(key, hashedKey, entry)
}

// Append appends entry under the key if key exists, otherwise
// it will set the key (same behaviour as Set()). With Append() you can
// concatenate multiple entries under the same key in an lock optimized way.
Expand Down
34 changes: 32 additions & 2 deletions bigcache_bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@ var message = blob('a', 256)
func BenchmarkWriteToCacheWith1Shard(b *testing.B) {
writeToCache(b, 1, 100*time.Second, b.N)
}

func BenchmarkWriteToCacheUsingSetOrGetWith1Shard(b *testing.B) {
writeToCacheWithSetOrGet(b, 1, 100*time.Second, b.N)
}
func BenchmarkWriteToLimitedCacheWithSmallInitSizeAnd1Shard(b *testing.B) {
m := blob('a', 1024)
cache, _ := New(context.Background(), Config{
Expand Down Expand Up @@ -53,6 +55,13 @@ func BenchmarkWriteToCache(b *testing.B) {
})
}
}
func BenchmarkWriteToCacheUsingSetOrGet(b *testing.B) {
for _, shards := range []int{1, 512, 1024, 8192} {
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
writeToCacheWithSetOrGet(b, shards, 100*time.Second, b.N)
})
}
}
func BenchmarkAppendToCache(b *testing.B) {
for _, shards := range []int{1, 512, 1024, 8192} {
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
Expand Down Expand Up @@ -112,7 +121,9 @@ func BenchmarkIterateOverCache(b *testing.B) {
func BenchmarkWriteToCacheWith1024ShardsAndSmallShardInitSize(b *testing.B) {
writeToCache(b, 1024, 100*time.Second, 100)
}

func BenchmarkWriteUsingSetOrGetToCacheWith1024ShardsAndSmallShardInitSize(b *testing.B) {
writeToCacheWithSetOrGet(b, 1024, 100*time.Second, 100)
}
func BenchmarkReadFromCacheNonExistentKeys(b *testing.B) {
for _, shards := range []int{1, 512, 1024, 8192} {
b.Run(fmt.Sprintf("%d-shards", shards), func(b *testing.B) {
Expand Down Expand Up @@ -142,6 +153,25 @@ func writeToCache(b *testing.B, shards int, lifeWindow time.Duration, requestsIn
})
}

func writeToCacheWithSetOrGet(b *testing.B, shards int, lifeWindow time.Duration, requestsInLifeWindow int) {
cache, _ := New(context.Background(), Config{
Shards: shards,
LifeWindow: lifeWindow,
MaxEntriesInWindow: max(requestsInLifeWindow, 100),
MaxEntrySize: 500,
})
rand.Seed(time.Now().Unix())

b.RunParallel(func(pb *testing.PB) {
id := rand.Int()

b.ReportAllocs()
for pb.Next() {
_, _, _ = cache.SetOrGet(fmt.Sprintf("key-%d", id), message)
}
})
}

func appendToCache(b *testing.B, shards int, lifeWindow time.Duration, requestsInLifeWindow int) {
cache, _ := New(context.Background(), Config{
Shards: shards,
Expand Down
55 changes: 55 additions & 0 deletions bigcache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -945,6 +945,61 @@ func TestEntryUpdate(t *testing.T) {
assertEqual(t, []byte("value2"), cachedValue)
}

func TestSetOrGet(t *testing.T) {
t.Parallel()

// given
clock := mockedClock{value: 0}
cache, _ := newBigCache(context.Background(), Config{
Shards: 1,
LifeWindow: 6 * time.Second,
MaxEntriesInWindow: 1,
MaxEntrySize: 256,
}, &clock)

// when
entry1, loaded1, _ := cache.SetOrGet("key1", []byte("value1"))
entry2, loaded2, _ := cache.SetOrGet("key1", []byte("value2"))
entry3, loaded3, _ := cache.SetOrGet("key2", []byte("value3"))

cachedValue, _ := cache.Get("key1")

// then
assertEqual(t, []byte("value1"), entry1)
assertEqual(t, []byte("value1"), entry2)
assertEqual(t, []byte("value3"), entry3)
assertEqual(t, []byte("value1"), cachedValue)
assertEqual(t, false, loaded1)
assertEqual(t, true, loaded2)
assertEqual(t, false, loaded3)
}

func TestSetOrGetCollision(t *testing.T) {
t.Parallel()

// given
cache, _ := New(context.Background(), Config{
Shards: 1,
LifeWindow: 5 * time.Second,
MaxEntriesInWindow: 10,
MaxEntrySize: 256,
Verbose: true,
Hasher: hashStub(5),
})

//when
entry1, loaded1, _ := cache.SetOrGet("a", []byte("value1"))
entry2, loaded2, _ := cache.SetOrGet("b", []byte("value2"))

// then
assertEqual(t, []byte("value1"), entry1)
assertEqual(t, []byte("value2"), entry2)
assertEqual(t, false, loaded1)
assertEqual(t, false, loaded2)
assertEqual(t, cache.Stats().Collisions, int64(1))

}

func TestOldestEntryDeletionWhenMaxCacheSizeIsReached(t *testing.T) {
t.Parallel()

Expand Down
35 changes: 35 additions & 0 deletions shard.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,6 +151,41 @@ func (s *cacheShard) set(key string, hashedKey uint64, entry []byte) error {
}
}

func (s *cacheShard) setOrGet(key string, hashedKey uint64, entry []byte) (actual []byte, loaded bool, err error) {
s.lock.RLock()

wrappedEntry, err := s.getWrappedEntry(hashedKey)
if err == nil {
if entryKey := readKeyFromEntry(wrappedEntry); key == entryKey {
actual = readEntry(wrappedEntry)
s.hit(hashedKey)
s.lock.RUnlock()
return actual, true, nil
} else {

s.collision()
if s.isVerbose {
s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey)
}

delete(s.hashmap, hashedKey)
s.onRemove(wrappedEntry, Deleted)
if s.statsEnabled {
delete(s.hashmapStats, hashedKey)
}
resetHashFromEntry(wrappedEntry)
}
} else if !errors.Is(err, ErrEntryNotFound) {
s.lock.RUnlock()
return entry, false, err
}

s.lock.RUnlock()
s.lock.Lock()
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this is problematic. We have a read lock and then we lock to insert something. Another goroutine might hijack our lock in meantime.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for answer
i think i found somewhere that this actions(locks) are atomic and in that case will not be possible to jump between them and with performance in mind i have sent code you have seen. Original code is below it is more readable and if you sad more robust, than i will change request with code below.

Original code was

func (s *cacheShard) setOrGet(key string, hashedKey uint64, entry []byte) (actual []byte, loaded bool, err error) {
	s.lock.Lock()
	defer s.lock.Unlock()

	wrappedEntry, err := s.getWrappedEntry(hashedKey)
	if err == nil {
		if entryKey := readKeyFromEntry(wrappedEntry); key == entryKey {
			actual = readEntry(wrappedEntry)
			s.hit(hashedKey)
			return actual, true, nil
		} else {

			s.collision()
			if s.isVerbose {
				s.logger.Printf("Collision detected. Both %q and %q have the same hash %x", key, entryKey, hashedKey)
			}

			delete(s.hashmap, hashedKey)
			s.onRemove(wrappedEntry, Deleted)
			if s.statsEnabled {
				delete(s.hashmapStats, hashedKey)
			}
			resetHashFromEntry(wrappedEntry)
		}
	} else if !errors.Is(err, ErrEntryNotFound) {
		return entry, false, err
	}

	return entry, false, s.addNewWithoutLock(key, hashedKey, entry)
}

defer s.lock.Unlock()
janisz marked this conversation as resolved.
Show resolved Hide resolved
return entry, false, s.addNewWithoutLock(key, hashedKey, entry)
}

func (s *cacheShard) addNewWithoutLock(key string, hashedKey uint64, entry []byte) error {
currentTimestamp := uint64(s.clock.Epoch())

Expand Down
Loading