mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-23 20:37:12 +01:00
0f24078146
These caches aren't expected to grow big, so it is OK to use the most simplest cache based on sync.Map. The benefit of this cache compared to workingsetcache is better scalability on systems with many CPU cores, since it doesn't use mutexes at fast path. An additional benefit is lower memory usage on average, since the size of in-memory cache equals working set for the last 3 minutes. The downside is that there is no upper bound for the cache size, so it may grow big during workload spikes. But this is very unlikely for typical workloads.
60 lines
1.2 KiB
Go
60 lines
1.2 KiB
Go
package logstorage
|
|
|
|
import (
|
|
"fmt"
|
|
"testing"
|
|
)
|
|
|
|
func TestCache(t *testing.T) {
|
|
m := make(map[string]int)
|
|
for i := 0; i < 10; i++ {
|
|
k := fmt.Sprintf("key_%d", i)
|
|
m[k] = i
|
|
}
|
|
|
|
c := newCache()
|
|
defer c.MustStop()
|
|
|
|
for kStr := range m {
|
|
k := []byte(kStr)
|
|
|
|
if v, ok := c.Get(k); ok {
|
|
t.Fatalf("unexpected value obtained from the cache for key %q: %v", k, v)
|
|
}
|
|
c.Set(k, m[kStr])
|
|
v, ok := c.Get(k)
|
|
if !ok {
|
|
t.Fatalf("cannot obtain value for key %q", k)
|
|
}
|
|
if n := v.(int); n != m[kStr] {
|
|
t.Fatalf("unexpected value obtained for key %q; got %d; want %d", k, n, m[kStr])
|
|
}
|
|
}
|
|
|
|
// The cached entries should be still visible after a single clean() call.
|
|
c.clean()
|
|
for kStr := range m {
|
|
k := []byte(kStr)
|
|
|
|
v, ok := c.Get(k)
|
|
if !ok {
|
|
t.Fatalf("cannot obtain value for key %q", k)
|
|
}
|
|
if n := v.(int); n != m[kStr] {
|
|
t.Fatalf("unexpected value obtained for key %q; got %d; want %d", k, n, m[kStr])
|
|
}
|
|
}
|
|
|
|
// The cached entries must be dropped after two clean() calls.
|
|
c.clean()
|
|
c.clean()
|
|
|
|
for kStr := range m {
|
|
k := []byte(kStr)
|
|
|
|
if v, ok := c.Get(k); ok {
|
|
t.Fatalf("unexpected value obtained from the cache for key %q: %v", k, v)
|
|
}
|
|
}
|
|
}
|