diff --git a/config/config.go b/config/config.go index e866aeab9..e23ad1f71 100644 --- a/config/config.go +++ b/config/config.go @@ -22,12 +22,10 @@ const ( DefaultConfigName string = "dice.toml" DefaultConfigFilePath string = "./" - EvictSimpleFirst = "simple-first" - EvictAllKeysRandom = "allkeys-random" - EvictAllKeysLRU = "allkeys-lru" - EvictAllKeysLFU = "allkeys-lfu" + EvictBatchKeysLRU string = "batch_keys_lru" - DefaultKeysLimit int = 200000000 + DefaultKeysLimit int = 200000000 + DefaultEvictionRatio float64 = 0.1 ) var ( @@ -50,7 +48,8 @@ var ( InitConfigCmd = false - KeysLimit = DefaultKeysLimit + KeysLimit = DefaultKeysLimit + EvictionRatio = DefaultEvictionRatio EnableProfiling = false @@ -97,11 +96,11 @@ type Config struct { } `mapstructure:"performance"` Memory struct { - MaxMemory int64 `mapstructure:"maxmemory"` - EvictionPolicy string `mapstructure:"evictionpolicy"` - EvictionRatio float64 `mapstructure:"evictionratio"` - KeysLimit int `mapstructure:"keyslimit"` - LFULogFactor int `mapstructure:"lfulogfactor"` + MaxMemory int64 `mapstructure:"maxmemory"` + EvictionStrategy string `mapstructure:"evictionstrategy"` + EvictionRatio float64 `mapstructure:"evictionratio"` + KeysLimit int `mapstructure:"keyslimit"` + LFULogFactor int `mapstructure:"lfulogfactor"` } `mapstructure:"memory"` Persistence struct { @@ -181,17 +180,17 @@ var baseConfig = Config{ AdhocReqChanBufSize: 20, // assuming we wouldn't have more than 20 adhoc requests being sent at a time. }, Memory: struct { - MaxMemory int64 `mapstructure:"maxmemory"` - EvictionPolicy string `mapstructure:"evictionpolicy"` - EvictionRatio float64 `mapstructure:"evictionratio"` - KeysLimit int `mapstructure:"keyslimit"` - LFULogFactor int `mapstructure:"lfulogfactor"` + MaxMemory int64 `mapstructure:"maxmemory"` + EvictionStrategy string `mapstructure:"evictionstrategy"` + EvictionRatio float64 `mapstructure:"evictionratio"` + KeysLimit int `mapstructure:"keyslimit"` + LFULogFactor int `mapstructure:"lfulogfactor"` }{ - MaxMemory: 0, - EvictionPolicy: EvictAllKeysLFU, - EvictionRatio: 0.9, - KeysLimit: DefaultKeysLimit, - LFULogFactor: 10, + MaxMemory: 0, + EvictionStrategy: EvictBatchKeysLRU, + EvictionRatio: DefaultEvictionRatio, + KeysLimit: DefaultKeysLimit, + LFULogFactor: 10, }, Persistence: struct { AOFFile string `mapstructure:"aoffile"` @@ -366,6 +365,10 @@ func mergeFlagsWithConfig() { if KeysLimit != DefaultKeysLimit { DiceConfig.Memory.KeysLimit = KeysLimit } + + if EvictionRatio != DefaultEvictionRatio { + DiceConfig.Memory.EvictionRatio = EvictionRatio + } } // This function checks if the config file is present or not at default location or at -c flag location diff --git a/dice.toml b/dice.toml index 89d7a3575..02f330fc8 100644 --- a/dice.toml +++ b/dice.toml @@ -35,7 +35,7 @@ AdhocReqChanBufSize = 20 [Memory] MaxMemory = 0 EvictionPolicy = 'allkeys-lfu' -EvictionRatio = 0.9 +EvictionRatio = 0.1 KeysLimit = 200000000 LFULogFactor = 10 diff --git a/internal/eval/bloom_test.go b/internal/eval/bloom_test.go index 8e027eaa4..3d06c3daa 100644 --- a/internal/eval/bloom_test.go +++ b/internal/eval/bloom_test.go @@ -13,7 +13,7 @@ import ( ) func TestBloomFilter(t *testing.T) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) // This test only contains some basic checks for all the bloom filter // operations like BFRESERVE, BFADD, BFEXISTS. It assumes that the // functions called in the main function are working correctly and @@ -94,7 +94,7 @@ func TestBloomFilter(t *testing.T) { } func TestGetOrCreateBloomFilter(t *testing.T) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) // Create a key and default opts key := "bf" opts := defaultBloomOpts() diff --git a/internal/eval/commands.go b/internal/eval/commands.go index 76dcd733f..45a26e6c3 100644 --- a/internal/eval/commands.go +++ b/internal/eval/commands.go @@ -453,13 +453,6 @@ var ( Eval: evalLATENCY, Arity: -2, } - lruCmdMeta = DiceCmdMeta{ - Name: "LRU", - Info: `LRU deletes all the keys from the LRU - returns encoded RESP OK`, - Eval: evalLRU, - Arity: 1, - } sleepCmdMeta = DiceCmdMeta{ Name: "SLEEP", Info: `SLEEP sets db to sleep for the specified number of seconds. @@ -1493,7 +1486,6 @@ func init() { DiceCmds["LLEN"] = llenCmdMeta DiceCmds["LPOP"] = lpopCmdMeta DiceCmds["LPUSH"] = lpushCmdMeta - DiceCmds["LRU"] = lruCmdMeta DiceCmds["MGET"] = MGetCmdMeta DiceCmds["MSET"] = msetCmdMeta DiceCmds["OBJECT"] = objectCmdMeta diff --git a/internal/eval/countminsketch_test.go b/internal/eval/countminsketch_test.go index 644b5345c..6c823f655 100644 --- a/internal/eval/countminsketch_test.go +++ b/internal/eval/countminsketch_test.go @@ -9,7 +9,7 @@ import ( ) func TestCountMinSketch(t *testing.T) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) testCMSInitByDim(t, store) testCMSInitByProb(t, store) diff --git a/internal/eval/eval.go b/internal/eval/eval.go index f9cb687b0..1092fdf62 100644 --- a/internal/eval/eval.go +++ b/internal/eval/eval.go @@ -576,13 +576,6 @@ func evalLATENCY(args []string, store *dstore.Store) []byte { return clientio.Encode([]string{}, false) } -// evalLRU deletes all the keys from the LRU -// returns encoded RESP OK -func evalLRU(args []string, store *dstore.Store) []byte { - dstore.EvictAllkeysLRUOrLFU(store) - return clientio.RespOK -} - // evalSLEEP sets db to sleep for the specified number of seconds. // The sleep time should be the only param in args. // Returns error response if the time param in args is not of integer format. diff --git a/internal/eval/eval_test.go b/internal/eval/eval_test.go index b80345d0f..f1deb51d1 100644 --- a/internal/eval/eval_test.go +++ b/internal/eval/eval_test.go @@ -12,17 +12,15 @@ import ( "testing" "time" - "github.com/dicedb/dice/internal/eval/sortedset" - "github.com/dicedb/dice/internal/server/utils" - - "github.com/bytedance/sonic" - "github.com/ohler55/ojg/jp" - "github.com/axiomhq/hyperloglog" + "github.com/bytedance/sonic" "github.com/dicedb/dice/internal/clientio" diceerrors "github.com/dicedb/dice/internal/errors" + "github.com/dicedb/dice/internal/eval/sortedset" "github.com/dicedb/dice/internal/object" + "github.com/dicedb/dice/internal/server/utils" dstore "github.com/dicedb/dice/internal/store" + "github.com/ohler55/ojg/jp" "github.com/stretchr/testify/assert" ) @@ -42,7 +40,7 @@ func setupTest(store *dstore.Store) *dstore.Store { } func TestEval(t *testing.T) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) testEvalMSET(t, store) testEvalECHO(t, store) @@ -1572,7 +1570,7 @@ func testEvalJSONOBJLEN(t *testing.T, store *dstore.Store) { func BenchmarkEvalJSONOBJLEN(b *testing.B) { sizes := []int{0, 10, 100, 1000, 10000, 100000} // Various sizes of JSON objects - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) for _, size := range sizes { b.Run(fmt.Sprintf("JSONObjectSize_%d", size), func(b *testing.B) { @@ -4367,13 +4365,13 @@ func runMigratedEvalTests(t *testing.T, tests map[string]evalTestCase, evalFunc func BenchmarkEvalMSET(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) evalMSET([]string{"KEY", "VAL", "KEY2", "VAL2"}, store) } } func BenchmarkEvalHSET(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) for i := 0; i < b.N; i++ { evalHSET([]string{"KEY", fmt.Sprintf("FIELD_%d", i), fmt.Sprintf("VALUE_%d", i)}, store) } @@ -4666,7 +4664,7 @@ func testEvalHKEYS(t *testing.T, store *dstore.Store) { } func BenchmarkEvalHKEYS(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) for i := 0; i < b.N; i++ { evalHSET([]string{"KEY", fmt.Sprintf("FIELD_%d", i), fmt.Sprintf("VALUE_%d", i)}, store) @@ -4678,7 +4676,7 @@ func BenchmarkEvalHKEYS(b *testing.B) { } func BenchmarkEvalPFCOUNT(b *testing.B) { - store := *dstore.NewStore(nil, nil) + store := *dstore.NewStore(nil, nil, nil) // Helper function to create and insert HLL objects createAndInsertHLL := func(key string, items []string) { @@ -5025,7 +5023,7 @@ func testEvalHLEN(t *testing.T, store *dstore.Store) { func BenchmarkEvalHLEN(b *testing.B) { sizes := []int{0, 10, 100, 1000, 10000, 100000} - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) for _, size := range sizes { b.Run(fmt.Sprintf("HashSize_%d", size), func(b *testing.B) { @@ -5337,7 +5335,7 @@ func testEvalTYPE(t *testing.T, store *dstore.Store) { } func BenchmarkEvalTYPE(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) // Define different types of objects to benchmark objectTypes := map[string]func(){ @@ -5681,7 +5679,7 @@ func testEvalJSONOBJKEYS(t *testing.T, store *dstore.Store) { func BenchmarkEvalJSONOBJKEYS(b *testing.B) { sizes := []int{0, 10, 100, 1000, 10000, 100000} // Various sizes of JSON objects - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) for _, size := range sizes { b.Run(fmt.Sprintf("JSONObjectSize_%d", size), func(b *testing.B) { @@ -5939,7 +5937,7 @@ func testEvalGETRANGE(t *testing.T, store *dstore.Store) { } func BenchmarkEvalGETRANGE(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) store.Put("BENCHMARK_KEY", store.NewObj("Hello World", maxExDuration, object.ObjTypeString, object.ObjEncodingRaw)) inputs := []struct { @@ -5964,7 +5962,7 @@ func BenchmarkEvalGETRANGE(b *testing.B) { } func BenchmarkEvalHSETNX(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) for i := 0; i < b.N; i++ { evalHSETNX([]string{"KEY", fmt.Sprintf("FIELD_%d", i/2), fmt.Sprintf("VALUE_%d", i)}, store) } @@ -6067,7 +6065,7 @@ func testEvalHSETNX(t *testing.T, store *dstore.Store) { runMigratedEvalTests(t, tests, evalHSETNX, store) } func TestMSETConsistency(t *testing.T) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) evalMSET([]string{"KEY", "VAL", "KEY2", "VAL2"}, store) assert.Equal(t, "VAL", store.Get("KEY").Value) @@ -6075,7 +6073,7 @@ func TestMSETConsistency(t *testing.T) { } func BenchmarkEvalHINCRBY(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) // creating new fields for i := 0; i < b.N; i++ { @@ -6327,7 +6325,7 @@ func testEvalSETEX(t *testing.T, store *dstore.Store) { } func BenchmarkEvalSETEX(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -6506,7 +6504,7 @@ func testEvalINCRBYFLOAT(t *testing.T, store *dstore.Store) { } func BenchmarkEvalINCRBYFLOAT(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) store.Put("key1", store.NewObj("1", maxExDuration, object.ObjTypeString, object.ObjEncodingEmbStr)) store.Put("key2", store.NewObj("1.2", maxExDuration, object.ObjTypeString, object.ObjEncodingEmbStr)) @@ -6745,7 +6743,7 @@ func testEvalBITOP(t *testing.T, store *dstore.Store) { } func BenchmarkEvalBITOP(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) // Setup initial data for benchmarking store.Put("key1", store.NewObj(&ByteArray{data: []byte{0x01, 0x02, 0xff}}, maxExDuration, object.ObjTypeByteArray, object.ObjEncodingByteArray)) @@ -7059,7 +7057,7 @@ func testEvalAPPEND(t *testing.T, store *dstore.Store) { } func BenchmarkEvalAPPEND(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) for i := 0; i < b.N; i++ { evalAPPEND([]string{"key", fmt.Sprintf("val_%d", i)}, store) } @@ -7787,7 +7785,7 @@ func BenchmarkEvalZPOPMIN(b *testing.B) { }, } - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) for _, bm := range benchmarks { b.Run(bm.name, func(b *testing.B) { @@ -7953,7 +7951,7 @@ func testEvalZREM(t *testing.T, store *dstore.Store) { } func BenchmarkEvalZRANK(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) // Set up initial sorted set evalZADD([]string{"myzset", "1", "member1", "2", "member2", "3", "member3"}, store) @@ -8235,7 +8233,7 @@ func testEvalHINCRBYFLOAT(t *testing.T, store *dstore.Store) { } func BenchmarkEvalHINCRBYFLOAT(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) // Setting initial fields with some values store.Put("key1", store.NewObj(HashMap{"field1": "1.0", "field2": "1.2"}, maxExDuration, object.ObjTypeHashMap, object.ObjEncodingHashMap)) @@ -8619,7 +8617,7 @@ func testEvalJSONSTRAPPEND(t *testing.T, store *dstore.Store) { } func BenchmarkEvalJSONSTRAPPEND(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) // Setup a sample JSON document key := "doc1" @@ -8743,7 +8741,7 @@ func BenchmarkEvalZPOPMAX(b *testing.B) { }, } - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) for _, bm := range benchmarks { b.Run(bm.name, func(b *testing.B) { @@ -8759,7 +8757,7 @@ func BenchmarkEvalZPOPMAX(b *testing.B) { } } func BenchmarkZCOUNT(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) // Populate the sorted set with some members for basic benchmarks evalZADD([]string{"key", "10", "member1", "20", "member2", "30", "member3"}, store) @@ -8788,7 +8786,7 @@ func BenchmarkZCOUNT(b *testing.B) { // Benchmark for edge cases b.Run("Edge Case ZCOUNT", func(b *testing.B) { // Reset the store and set up members - store = dstore.NewStore(nil, nil) + store = dstore.NewStore(nil, nil, nil) evalZADD([]string{"key", "5", "member1", "15", "member2", "25", "member3"}, store) b.ResetTimer() diff --git a/internal/eval/hmap_test.go b/internal/eval/hmap_test.go index 62acca4ad..40fcc8005 100644 --- a/internal/eval/hmap_test.go +++ b/internal/eval/hmap_test.go @@ -71,7 +71,7 @@ func TestHashMapIncrementValue(t *testing.T) { } func TestGetValueFromHashMap(t *testing.T) { - store := store.NewStore(nil, nil) + store := store.NewStore(nil, nil, nil) key := "key1" field := "field1" value := "value1" diff --git a/internal/eval/main_test.go b/internal/eval/main_test.go index 8c9fe4a80..bd6dac9c1 100644 --- a/internal/eval/main_test.go +++ b/internal/eval/main_test.go @@ -8,7 +8,7 @@ import ( ) func TestMain(m *testing.M) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) store.ResetStore() exitCode := m.Run() diff --git a/internal/server/utils/time.go b/internal/server/utils/time.go index 07d53fe18..88fd2949f 100644 --- a/internal/server/utils/time.go +++ b/internal/server/utils/time.go @@ -28,6 +28,10 @@ func (mc *MockClock) SetTime(t time.Time) { mc.CurrTime = t } +func (mc *MockClock) GetTime() time.Time { + return mc.CurrTime +} + func GetCurrentTime() time.Time { return CurrentTime.Now() } diff --git a/internal/shard/shard_manager.go b/internal/shard/shard_manager.go index c94a13b4d..86bf41fbf 100644 --- a/internal/shard/shard_manager.go +++ b/internal/shard/shard_manager.go @@ -7,6 +7,8 @@ import ( "sync" "syscall" + "github.com/dicedb/dice/config" + "github.com/cespare/xxhash/v2" "github.com/dicedb/dice/internal/ops" dstore "github.com/dicedb/dice/internal/store" @@ -30,9 +32,11 @@ func NewShardManager(shardCount uint8, queryWatchChan chan dstore.QueryWatchEven shardReqMap := make(map[ShardID]chan *ops.StoreOp) shardErrorChan := make(chan *ShardError) + maxKeysPerShard := config.KeysLimit / int(shardCount) for i := uint8(0); i < shardCount; i++ { + evictionStrategy := dstore.NewBatchEvictionLRU(maxKeysPerShard, config.DiceConfig.Memory.EvictionRatio) // Shards are numbered from 0 to shardCount-1 - shard := NewShardThread(i, globalErrorChan, shardErrorChan, queryWatchChan, cmdWatchChan) + shard := NewShardThread(i, globalErrorChan, shardErrorChan, queryWatchChan, cmdWatchChan, evictionStrategy) shards[i] = shard shardReqMap[i] = shard.ReqChan } diff --git a/internal/shard/shard_thread.go b/internal/shard/shard_thread.go index a556482e0..635c9dafe 100644 --- a/internal/shard/shard_thread.go +++ b/internal/shard/shard_thread.go @@ -41,10 +41,11 @@ type ShardThread struct { } // NewShardThread creates a new ShardThread instance with the given shard id and error channel. -func NewShardThread(id ShardID, gec chan error, sec chan *ShardError, queryWatchChan chan dstore.QueryWatchEvent, cmdWatchChan chan dstore.CmdWatchEvent) *ShardThread { +func NewShardThread(id ShardID, gec chan error, sec chan *ShardError, queryWatchChan chan dstore.QueryWatchEvent, + cmdWatchChan chan dstore.CmdWatchEvent, evictionStrategy dstore.EvictionStrategy) *ShardThread { return &ShardThread{ id: id, - store: dstore.NewStore(queryWatchChan, cmdWatchChan), + store: dstore.NewStore(queryWatchChan, cmdWatchChan, evictionStrategy), ReqChan: make(chan *ops.StoreOp, 1000), workerMap: make(map[string]WorkerChannels), globalErrorChan: gec, diff --git a/internal/sql/executerbechmark_test.go b/internal/sql/executerbechmark_test.go index 91d1cd5e4..082087212 100644 --- a/internal/sql/executerbechmark_test.go +++ b/internal/sql/executerbechmark_test.go @@ -35,7 +35,7 @@ func generateBenchmarkData(count int, store *dstore.Store) { } func BenchmarkExecuteQueryOrderBykey(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, dstore.NewBatchEvictionLRU(config.DefaultKeysLimit, config.DefaultEvictionRatio)) for _, v := range benchmarkDataSizes { generateBenchmarkData(v, store) @@ -60,7 +60,7 @@ func BenchmarkExecuteQueryOrderBykey(b *testing.B) { } func BenchmarkExecuteQueryBasicOrderByValue(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, dstore.NewBatchEvictionLRU(config.DefaultKeysLimit, config.DefaultEvictionRatio)) for _, v := range benchmarkDataSizes { generateBenchmarkData(v, store) @@ -83,7 +83,7 @@ func BenchmarkExecuteQueryBasicOrderByValue(b *testing.B) { } func BenchmarkExecuteQueryLimit(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, dstore.NewBatchEvictionLRU(config.DefaultKeysLimit, config.DefaultEvictionRatio)) for _, v := range benchmarkDataSizes { generateBenchmarkData(v, store) @@ -106,7 +106,7 @@ func BenchmarkExecuteQueryLimit(b *testing.B) { } func BenchmarkExecuteQueryNoMatch(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, dstore.NewBatchEvictionLRU(config.DefaultKeysLimit, config.DefaultEvictionRatio)) for _, v := range benchmarkDataSizes { generateBenchmarkData(v, store) @@ -129,7 +129,7 @@ func BenchmarkExecuteQueryNoMatch(b *testing.B) { } func BenchmarkExecuteQueryWithBasicWhere(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, dstore.NewBatchEvictionLRU(config.DefaultKeysLimit, config.DefaultEvictionRatio)) for _, v := range benchmarkDataSizes { generateBenchmarkData(v, store) @@ -152,7 +152,7 @@ func BenchmarkExecuteQueryWithBasicWhere(b *testing.B) { } func BenchmarkExecuteQueryWithComplexWhere(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, dstore.NewBatchEvictionLRU(config.DefaultKeysLimit, config.DefaultEvictionRatio)) for _, v := range benchmarkDataSizes { generateBenchmarkData(v, store) @@ -175,7 +175,7 @@ func BenchmarkExecuteQueryWithComplexWhere(b *testing.B) { } func BenchmarkExecuteQueryWithCompareWhereKeyandValue(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, dstore.NewBatchEvictionLRU(config.DefaultKeysLimit, config.DefaultEvictionRatio)) for _, v := range benchmarkDataSizes { generateBenchmarkData(v, store) @@ -198,7 +198,7 @@ func BenchmarkExecuteQueryWithCompareWhereKeyandValue(b *testing.B) { } func BenchmarkExecuteQueryWithBasicWhereNoMatch(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, dstore.NewBatchEvictionLRU(config.DefaultKeysLimit, config.DefaultEvictionRatio)) for _, v := range benchmarkDataSizes { generateBenchmarkData(v, store) @@ -221,7 +221,7 @@ func BenchmarkExecuteQueryWithBasicWhereNoMatch(b *testing.B) { } func BenchmarkExecuteQueryWithCaseSesnsitivity(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, dstore.NewBatchEvictionLRU(config.DefaultKeysLimit, config.DefaultEvictionRatio)) for _, v := range benchmarkDataSizes { generateBenchmarkData(v, store) @@ -243,7 +243,7 @@ func BenchmarkExecuteQueryWithCaseSesnsitivity(b *testing.B) { } func BenchmarkExecuteQueryWithClauseOnKey(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, dstore.NewBatchEvictionLRU(config.DefaultKeysLimit, config.DefaultEvictionRatio)) for _, v := range benchmarkDataSizes { generateBenchmarkData(v, store) @@ -266,7 +266,7 @@ func BenchmarkExecuteQueryWithClauseOnKey(b *testing.B) { } func BenchmarkExecuteQueryWithAllMatchingKeyRegex(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, dstore.NewBatchEvictionLRU(config.DefaultKeysLimit, config.DefaultEvictionRatio)) for _, v := range benchmarkDataSizes { generateBenchmarkData(v, store) @@ -308,7 +308,7 @@ func generateBenchmarkJSONData(b *testing.B, count int, json string, store *dsto } func BenchmarkExecuteQueryWithJSON(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, dstore.NewBatchEvictionLRU(config.DefaultKeysLimit, config.DefaultEvictionRatio)) for _, v := range benchmarkDataSizesJSON { for jsonSize, json := range jsonList { generateBenchmarkJSONData(b, v, json, store) @@ -333,7 +333,7 @@ func BenchmarkExecuteQueryWithJSON(b *testing.B) { } func BenchmarkExecuteQueryWithNestedJSON(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, dstore.NewBatchEvictionLRU(config.DefaultKeysLimit, config.DefaultEvictionRatio)) for _, v := range benchmarkDataSizesJSON { for jsonSize, json := range jsonList { generateBenchmarkJSONData(b, v, json, store) @@ -358,7 +358,7 @@ func BenchmarkExecuteQueryWithNestedJSON(b *testing.B) { } func BenchmarkExecuteQueryWithJsonInLeftAndRightExpressions(b *testing.B) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, dstore.NewBatchEvictionLRU(config.DefaultKeysLimit, config.DefaultEvictionRatio)) for _, v := range benchmarkDataSizesJSON { for jsonSize, json := range jsonList { generateBenchmarkJSONData(b, v, json, store) @@ -384,7 +384,7 @@ func BenchmarkExecuteQueryWithJsonInLeftAndRightExpressions(b *testing.B) { func BenchmarkExecuteQueryWithJsonNoMatch(b *testing.B) { for _, v := range benchmarkDataSizesJSON { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, dstore.NewBatchEvictionLRU(config.DefaultKeysLimit, config.DefaultEvictionRatio)) for jsonSize, json := range jsonList { generateBenchmarkJSONData(b, v, json, store) diff --git a/internal/sql/executor_test.go b/internal/sql/executor_test.go index b8a33f7de..9e1978527 100644 --- a/internal/sql/executor_test.go +++ b/internal/sql/executor_test.go @@ -4,11 +4,10 @@ import ( "sort" "testing" - "github.com/dicedb/dice/internal/object" - "github.com/dicedb/dice/internal/sql" - "github.com/bytedance/sonic" + "github.com/dicedb/dice/internal/object" "github.com/dicedb/dice/internal/server/utils" + "github.com/dicedb/dice/internal/sql" dstore "github.com/dicedb/dice/internal/store" "github.com/stretchr/testify/assert" "github.com/xwb1989/sqlparser" @@ -42,7 +41,7 @@ func setup(store *dstore.Store, dataset []keyValue) { } func TestExecuteQueryOrderBykey(t *testing.T) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) setup(store, simpleKVDataset) queryString := "SELECT $key, $value WHERE $key like 'k*' ORDER BY $key ASC" @@ -69,7 +68,7 @@ func TestExecuteQueryOrderBykey(t *testing.T) { } func TestExecuteQueryBasicOrderByValue(t *testing.T) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) setup(store, simpleKVDataset) queryStr := "SELECT $key, $value WHERE $key like 'k*' ORDER BY $value ASC" @@ -96,7 +95,7 @@ func TestExecuteQueryBasicOrderByValue(t *testing.T) { } func TestExecuteQueryLimit(t *testing.T) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) setup(store, simpleKVDataset) queryStr := "SELECT $value WHERE $key like 'k*' ORDER BY $key ASC LIMIT 3" @@ -123,7 +122,7 @@ func TestExecuteQueryLimit(t *testing.T) { } func TestExecuteQueryNoMatch(t *testing.T) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) setup(store, simpleKVDataset) queryStr := "SELECT $key, $value WHERE $key like 'x*'" @@ -137,7 +136,7 @@ func TestExecuteQueryNoMatch(t *testing.T) { } func TestExecuteQueryWithWhere(t *testing.T) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) setup(store, simpleKVDataset) t.Run("BasicWhereClause", func(t *testing.T) { queryStr := "SELECT $key, $value WHERE $value = 'v3' AND $key like 'k*'" @@ -190,7 +189,7 @@ func TestExecuteQueryWithWhere(t *testing.T) { } func TestExecuteQueryWithIncompatibleTypes(t *testing.T) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) setup(store, simpleKVDataset) t.Run("ComparingStrWithInt", func(t *testing.T) { @@ -205,7 +204,7 @@ func TestExecuteQueryWithIncompatibleTypes(t *testing.T) { } func TestExecuteQueryWithEdgeCases(t *testing.T) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) setup(store, simpleKVDataset) t.Run("CaseSensitivity", func(t *testing.T) { @@ -285,7 +284,7 @@ func setupJSON(t *testing.T, store *dstore.Store, dataset []keyValue) { } func TestExecuteQueryWithJsonExpressionInWhere(t *testing.T) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) setupJSON(t, store, jsonWhereClauseDataset) t.Run("BasicWhereClauseWithJSON", func(t *testing.T) { @@ -394,7 +393,7 @@ var jsonOrderDataset = []keyValue{ } func TestExecuteQueryWithJsonOrderBy(t *testing.T) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) setupJSON(t, store, jsonOrderDataset) t.Run("OrderBySimpleJSONField", func(t *testing.T) { @@ -557,7 +556,7 @@ var stringComparisonDataset = []keyValue{ } func TestExecuteQueryWithLikeStringComparisons(t *testing.T) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) setup(store, stringComparisonDataset) testCases := []struct { @@ -642,7 +641,7 @@ func TestExecuteQueryWithLikeStringComparisons(t *testing.T) { } func TestExecuteQueryWithStringNotLikeComparisons(t *testing.T) { - store := dstore.NewStore(nil, nil) + store := dstore.NewStore(nil, nil, nil) setup(store, stringComparisonDataset) testCases := []struct { diff --git a/internal/store/batchevictionlru.go b/internal/store/batchevictionlru.go new file mode 100644 index 000000000..5380f9d20 --- /dev/null +++ b/internal/store/batchevictionlru.go @@ -0,0 +1,113 @@ +package store + +import ( + "container/heap" + "math" + + "github.com/dicedb/dice/internal/object" +) + +// evictionItemHeap is a max-heap of evictionItems based on lastAccessed. +type evictionItemHeap []evictionItem + +func (h *evictionItemHeap) Len() int { return len(*h) } + +func (h *evictionItemHeap) Less(i, j int) bool { + // For a max-heap, we want higher lastAccessed at the top. + return (*h)[i].lastAccessed > (*h)[j].lastAccessed +} + +func (h *evictionItemHeap) Swap(i, j int) { (*h)[i], (*h)[j] = (*h)[j], (*h)[i] } + +func (h *evictionItemHeap) Push(x interface{}) { + *h = append(*h, x.(evictionItem)) +} + +func (h *evictionItemHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[:n-1] + return x +} + +// Encapsulate heap operations to avoid interface{} in main code. +func (h *evictionItemHeap) push(item evictionItem) { + heap.Push(h, item) +} + +func (h *evictionItemHeap) pop() evictionItem { + return heap.Pop(h).(evictionItem) +} + +// BatchEvictionLRU implements batch eviction of least recently used keys +type BatchEvictionLRU struct { + BaseEvictionStrategy + maxKeys int + evictionRatio float64 +} + +func NewBatchEvictionLRU(maxKeys int, evictionRatio float64) *BatchEvictionLRU { + return &BatchEvictionLRU{ + maxKeys: maxKeys, + evictionRatio: evictionRatio, + } +} + +func (e *BatchEvictionLRU) ShouldEvict(store *Store) int { + currentKeyCount := store.GetKeyCount() + + // Check if eviction is necessary only till the number of keys remains less than maxKeys + if currentKeyCount < e.maxKeys { + return 0 // No eviction needed + } + + // Calculate target key count after eviction + targetKeyCount := int(math.Ceil(float64(e.maxKeys) * (1 - e.evictionRatio))) + + // Calculate the number of keys to evict to reach the target key count + toEvict := currentKeyCount - targetKeyCount + if toEvict < 1 { + toEvict = 1 // Ensure at least one key is evicted if eviction is triggered + } + + return toEvict +} + +// EvictVictims deletes keys with the lowest LastAccessedAt values from the store. +func (e *BatchEvictionLRU) EvictVictims(store *Store, toEvict int) { + if toEvict <= 0 { + return + } + + h := make(evictionItemHeap, 0, toEvict) + heap.Init(&h) + + store.GetStore().All(func(k string, obj *object.Obj) bool { + item := evictionItem{ + key: k, + lastAccessed: obj.LastAccessedAt, + } + if h.Len() < toEvict { + h.push(item) + return true + } + + if item.lastAccessed < h[0].lastAccessed { + h.pop() + h.push(item) + } + return true + }) + + for h.Len() > 0 { + item := h.pop() + store.Del(item.key, WithDelCmd(Evict)) + } + + e.stats.recordEviction(int64(toEvict)) +} + +func (e *BatchEvictionLRU) OnAccess(key string, obj *object.Obj, accessType AccessType) { + // Nothing to do for LRU batch eviction +} diff --git a/internal/store/batchevictionlru_test.go b/internal/store/batchevictionlru_test.go new file mode 100644 index 000000000..3d2ed333e --- /dev/null +++ b/internal/store/batchevictionlru_test.go @@ -0,0 +1,176 @@ +package store + +import ( + "github.com/dicedb/dice/internal/object" + "github.com/dicedb/dice/internal/server/utils" + "github.com/stretchr/testify/assert" + "strconv" + "testing" + "time" +) + +func TestEvictVictims_BelowMaxKeys(t *testing.T) { + eviction := NewBatchEvictionLRU(5, 0.2) + s := NewStore(nil, nil, eviction) + + // Add 3 keys (below maxKeys of 5) + for i := 1; i <= 3; i++ { + key := "key" + strconv.Itoa(i) + obj := &object.Obj{ + LastAccessedAt: getCurrentClock() + uint32(i), + } + s.Put(key, obj) + } + + initialKeyCount := s.GetKeyCount() + toEvict := eviction.ShouldEvict(s) + assert.Equal(t, 0, toEvict, "Should not evict any keys when below maxKeys") + + eviction.EvictVictims(s, toEvict) + assert.Equal(t, initialKeyCount, s.GetKeyCount(), "No keys should be evicted when below maxKeys") +} + +func TestEvictVictims_ExceedsMaxKeys(t *testing.T) { + maxKeys := 5 + evictionRatio := 0.4 + eviction := NewBatchEvictionLRU(maxKeys, evictionRatio) + s := NewStore(nil, nil, eviction) + + // Add 10 keys, exceeding maxKeys of 5 + for i := 1; i <= 10; i++ { + key := "key" + strconv.Itoa(i) + obj := &object.Obj{ + LastAccessedAt: getCurrentClock() + uint32(i), + } + + s.Put(key, obj) + } + + // Ensure number of keys are equal to or below maxKeys after eviction + keyCount := s.GetKeyCount() + assert.True(t, keyCount <= maxKeys, "Should have max or lesser number of keys remaining after eviction") +} + +func TestEvictVictims_EvictsLRU(t *testing.T) { + mockTime := &utils.MockClock{CurrTime: time.Now()} + utils.CurrentTime = mockTime + + eviction := NewBatchEvictionLRU(10, 0.4) + s := NewStore(nil, nil, eviction) + + // Add keys with varying LastAccessedAt + keyIDs := []int{0, 7, 1, 9, 4, 6, 5, 2, 8, 3, 10} + for _, id := range keyIDs { + // Ensure LastAccessedAt is unique + key := "key" + strconv.Itoa(id) + obj := &object.Obj{} + mockTime.SetTime(mockTime.GetTime().Add(5 * time.Second)) + s.Put(key, obj) + } + + // Expected to evict 4 keys with lowest LastAccessedAt, i.e. the first 4 keys added to the store + evictedKeys := []string{"key0", "key7", "key1", "key9"} // Indices correspond to accessTimes + + remainingKeyCount := s.GetKeyCount() + assert.Equal(t, 7, remainingKeyCount, "Should have seven, 6(Post Eviction) + 1(new key added post eviction) keys remaining after eviction") + + // Verify that evicted keys are no longer in the store + for _, key := range evictedKeys { + obj := s.GetNoTouch(key) + assert.Nil(t, obj, "Key %s should have been evicted", key) + } +} + +func TestEvictVictims_IdenticalLastAccessedAt(t *testing.T) { + currentTime := time.Now() + mockTime := &utils.MockClock{CurrTime: currentTime} + utils.CurrentTime = mockTime + eviction := NewBatchEvictionLRU(10, 0.5) + s := NewStore(nil, nil, eviction) + + // Add 10 keys with identical LastAccessedAt + for i := 0; i <= 10; i++ { + key := "key" + strconv.Itoa(i) + obj := &object.Obj{} + mockTime.SetTime(currentTime) // Not needed, added explicitly for better clarity + s.Put(key, obj) + } + + expectedRemainingKeys := 6 // 5(Post Eviction) + 1 (key added after eviction) + assert.Equal(t, expectedRemainingKeys, s.GetKeyCount(), "Should have evicted 5 keys") +} + +func TestEvictVictims_EvictsAtLeastOne(t *testing.T) { + eviction := NewBatchEvictionLRU(10, 0.000) // 0% eviction rate + s := NewStore(nil, nil, eviction) + + // Add 10 keys (equals maxKeys) + for i := 0; i < 10; i++ { + key := "key" + strconv.Itoa(i) + obj := &object.Obj{} + s.Put(key, obj) + } + + toEvict := eviction.ShouldEvict(s) + assert.Equal(t, 1, toEvict, "Should evict at least one key") +} + +func TestEvictVictims_EmptyStore(t *testing.T) { // Handles Empty Store Gracefully + eviction := NewBatchEvictionLRU(5, 0.2) + s := NewStore(nil, nil, eviction) + + toEvict := eviction.ShouldEvict(s) + assert.Equal(t, 0, toEvict, "Should not evict any keys when store is empty") + + eviction.EvictVictims(s, toEvict) + assert.Equal(t, 0, s.GetKeyCount(), "Store should remain empty after eviction") +} + +func TestEvictVictims_LastAccessedAtUpdated(t *testing.T) { + currentTime := time.Now() + mockTime := &utils.MockClock{CurrTime: currentTime} + utils.CurrentTime = mockTime + eviction := NewBatchEvictionLRU(10, 0.4) + s := NewStore(nil, nil, eviction) + + // Add keys with initial LastAccessedAt + for i := 1; i <= 10; i++ { + key := "key" + strconv.Itoa(i) + obj := &object.Obj{} + s.Put(key, obj) + } + + // Simulate access to some keys, updating LastAccessedAt + accessedKeys := []string{"key2", "key3", "key4", "key5", "key6", "key7", "key8", "key10"} + for _, key := range accessedKeys { + mockTime.SetTime(mockTime.GetTime().Add(5 * time.Second)) + s.Get(key) // This should update LastAccessedAt + } + + // The keys that were not accessed should be more likely to be evicted + // Verify that verify accessed keys are still in the store + for _, key := range accessedKeys { + obj := s.GetNoTouch(key) + assert.NotNil(t, obj, "Key %s should remain after eviction", key) + } + + s.Put("key11", &object.Obj{}) // Trigger eviction + + // Verify that unaccessed keys were evicted + unaccessedKeys := []string{"key1", "key9"} + for _, key := range unaccessedKeys { + obj := s.GetNoTouch(key) + assert.Nil(t, obj, "Key %s should remain after eviction", key) + } + + // Verify that some of the accessed keys were evicted + numRemovedKeys := 0 + for _, key := range accessedKeys { + obj := s.GetNoTouch(key) + if obj == nil { + numRemovedKeys++ + } + } + + assert.True(t, numRemovedKeys == 2, "2 accessed key should have been evicted") +} diff --git a/internal/store/constants.go b/internal/store/constants.go index fad990f1c..06ebf5b56 100644 --- a/internal/store/constants.go +++ b/internal/store/constants.go @@ -14,4 +14,5 @@ const ( PFCOUNT string = "PFCOUNT" PFMERGE string = "PFMERGE" KEYSPERSHARD string = "KEYSPERSHARD" + Evict string = "EVICT" ) diff --git a/internal/store/eviction.go b/internal/store/eviction.go index af53f2735..f68205182 100644 --- a/internal/store/eviction.go +++ b/internal/store/eviction.go @@ -1,83 +1,81 @@ package store import ( - "math/rand" + "time" - "github.com/dicedb/dice/config" "github.com/dicedb/dice/internal/object" "github.com/dicedb/dice/internal/server/utils" ) -// Evicts the first key it found while iterating the map -// TODO: Make it efficient by doing thorough sampling -func evictFirst(store *Store) { - store.store.All(func(k string, obj *object.Obj) bool { - store.delByPtr(k, WithDelCmd(Del)) - // stop after iterating over the first element - return false - }) +// EvictionStats tracks common statistics for all eviction strategies +type EvictionStats struct { + totalEvictions uint64 + totalKeysEvicted uint64 + lastEvictionCount int64 + lastEvictionTimeMs int64 } -// Randomly removes keys to make space for the new data added. -// The number of keys removed will be sufficient to free up at least 10% space -func evictAllkeysRandom(store *Store) { - evictCount := int64(config.DiceConfig.Memory.EvictionRatio * float64(config.DiceConfig.Memory.KeysLimit)) - // Iteration of Golang dictionary can be considered as a random - // because it depends on the hash of the inserted key - store.store.All(func(k string, obj *object.Obj) bool { - store.delByPtr(k, WithDelCmd(Del)) - evictCount-- - // continue if evictCount > 0 - return evictCount > 0 - }) +func (s *EvictionStats) recordEviction(count int64) { + s.totalEvictions++ + s.totalKeysEvicted += uint64(count) + s.lastEvictionCount = count + s.lastEvictionTimeMs = time.Now().UnixMilli() } -/* - * The approximated LRU algorithm - */ -func getCurrentClock() uint32 { - return uint32(utils.GetCurrentTime().Unix()) & 0x00FFFFFF +// EvictionResult represents the outcome of an eviction operation +type EvictionResult struct { + Victims map[string]*object.Obj // Keys and objects that were selected for eviction + Count int64 // Number of items selected for eviction } -func GetLFULogCounter(lastAccessedAt uint32) uint8 { - return uint8((lastAccessedAt & 0xFF000000) >> 24) +// AccessType represents different types of access to a key +type AccessType int + +const ( + AccessGet AccessType = iota + AccessSet + AccessDel +) + +// evictionItem stores essential data needed for eviction decision +type evictionItem struct { + key string + lastAccessed uint32 } -func UpdateLFULastAccessedAt(lastAccessedAt uint32) uint32 { - currentUnixTime := getCurrentClock() - counter := GetLFULogCounter(lastAccessedAt) +// EvictionStrategy defines the interface for different eviction strategies +type EvictionStrategy interface { + // ShouldEvict checks if eviction should be triggered based on the current store state + // Returns the number of items that should be evicted, or 0 if no eviction is needed + ShouldEvict(store *Store) int + + // EvictVictims evicts items from the store based on the eviction strategy + EvictVictims(store *Store, toEvict int) - counter = incrLogCounter(counter) - return (uint32(counter) << 24) | currentUnixTime + // AfterEviction is called after victims have been evicted from the store + // This allows strategies to update their internal state if needed + // AfterEviction(result EvictionResult) + + // OnAccess is called when an item is accessed (get/set) + // This allows strategies to update access patterns/statistics + OnAccess(key string, obj *object.Obj, accessType AccessType) } -func GetLastAccessedAt(lastAccessedAt uint32) uint32 { - return lastAccessedAt & 0x00FFFFFF +// BaseEvictionStrategy provides common functionality for all eviction strategies +type BaseEvictionStrategy struct { + stats EvictionStats } -func UpdateLastAccessedAt(lastAccessedAt uint32) uint32 { - if config.DiceConfig.Memory.EvictionPolicy == config.EvictAllKeysLFU { - return UpdateLFULastAccessedAt(lastAccessedAt) - } - return getCurrentClock() +func (b *BaseEvictionStrategy) AfterEviction(result EvictionResult) { + b.stats.recordEviction(result.Count) } -/* - - Similar to redis implementation of increasing access counter for a key - - The larger the counter value, the lesser is probability of its increment in counter value - - This counter is 8-bit number that will represent an approximate access counter of a key and will - piggyback first 8 bits of `LastAccessedAt` field of Dice Object -*/ -func incrLogCounter(counter uint8) uint8 { - if counter == 255 { - return 255 - } - randomFactor := rand.Float32() //nolint:gosec - approxFactor := 1.0 / float32(counter*uint8(config.DiceConfig.Memory.LFULogFactor)+1) - if approxFactor > randomFactor { - counter++ - } - return counter +func (b *BaseEvictionStrategy) GetStats() EvictionStats { + return b.stats +} + +func getCurrentClock() uint32 { + return uint32(utils.GetCurrentTime().Unix()) & 0x00FFFFFF } func GetIdleTime(lastAccessedAt uint32) uint32 { @@ -88,47 +86,3 @@ func GetIdleTime(lastAccessedAt uint32) uint32 { } return (0x00FFFFFF - lastAccessedAt) + c } - -func PopulateEvictionPool(store *Store) { - sampleSize := 5 - // TODO: if we already have obj, why do we need to - // look up in store.store again? - store.store.All(func(k string, obj *object.Obj) bool { - v, ok := store.store.Get(k) - if ok { - EPool.Push(k, v.LastAccessedAt) - sampleSize-- - } - // continue if sample size > 0 - // stop as soon as it hits 0 - return sampleSize > 0 - }) -} - -// EvictAllkeysLRUOrLFU evicts keys based on LRU or LFU policy. -// TODO: no need to populate every time. should populate only when the number of keys to evict is less than what we have in the pool -func EvictAllkeysLRUOrLFU(store *Store) { - PopulateEvictionPool(store) - evictCount := int16(config.DiceConfig.Memory.EvictionRatio * float64(config.DiceConfig.Memory.KeysLimit)) - - for i := 0; i < int(evictCount) && len(EPool.pool) > 0; i++ { - item := EPool.Pop() - if item == nil { - return - } - store.DelByPtr(item.keyPtr, WithDelCmd(Del)) - } -} - -func (store *Store) evict() { - switch config.DiceConfig.Memory.EvictionPolicy { - case config.EvictSimpleFirst: - evictFirst(store) - case config.EvictAllKeysRandom: - evictAllkeysRandom(store) - case config.EvictAllKeysLRU: - EvictAllkeysLRUOrLFU(store) - case config.EvictAllKeysLFU: - EvictAllkeysLRUOrLFU(store) - } -} diff --git a/internal/store/evictionpool.go b/internal/store/evictionpool.go deleted file mode 100644 index 029596099..000000000 --- a/internal/store/evictionpool.go +++ /dev/null @@ -1,115 +0,0 @@ -package store - -import ( - "sort" - - "github.com/dicedb/dice/config" -) - -type PoolItem struct { - keyPtr string - lastAccessedAt uint32 -} - -// EvictionPool is a priority queue of PoolItem. -// TODO: When last accessed at of object changes update the poolItem corresponding to that -type EvictionPool struct { - pool []*PoolItem - keyset map[string]*PoolItem -} - -type ByIdleTime []*PoolItem -type ByCounterAndIdleTime []*PoolItem - -func (a ByIdleTime) Len() int { - return len(a) -} - -func (a ByIdleTime) Swap(i, j int) { - a[i], a[j] = a[j], a[i] -} - -func (a ByIdleTime) Less(i, j int) bool { - return GetIdleTime(a[i].lastAccessedAt) > GetIdleTime(a[j].lastAccessedAt) -} - -func (a ByCounterAndIdleTime) Len() int { - return len(a) -} - -func (a ByCounterAndIdleTime) Swap(i, j int) { - a[i], a[j] = a[j], a[i] -} - -func (a ByCounterAndIdleTime) Less(i, j int) bool { - counterI := GetLFULogCounter(a[i].lastAccessedAt) - counterJ := GetLFULogCounter(a[j].lastAccessedAt) - - if counterI == counterJ { - // if access counters are same, sort by idle time - return GetIdleTime(a[i].lastAccessedAt) > GetIdleTime(a[j].lastAccessedAt) - } - - return counterI < counterJ -} - -// Push adds a new item to the pool -// TODO: Make the implementation efficient to not need repeated sorting -func (pq *EvictionPool) Push(key string, lastAccessedAt uint32) { - _, ok := pq.keyset[key] - if ok { - return - } - item := &PoolItem{keyPtr: key, lastAccessedAt: lastAccessedAt} - if len(pq.pool) < ePoolSizeMax { - pq.keyset[key] = item - pq.pool = append(pq.pool, item) - - // Performance bottleneck - if config.DiceConfig.Memory.EvictionPolicy == config.EvictAllKeysLFU { - sort.Sort(ByCounterAndIdleTime(pq.pool)) - } else { - sort.Sort(ByIdleTime(pq.pool)) - } - } else { - shouldShift := func() bool { - if config.DiceConfig.Memory.EvictionPolicy == config.EvictAllKeysLFU { - logCounter, poolLogCounter := GetLFULogCounter(lastAccessedAt), GetLFULogCounter(pq.pool[0].lastAccessedAt) - if logCounter < poolLogCounter { - return true - } - if logCounter == poolLogCounter { - return GetLastAccessedAt(lastAccessedAt) > GetLastAccessedAt(pq.pool[0].lastAccessedAt) - } - return false - } - return lastAccessedAt > pq.pool[0].lastAccessedAt - }() - - if shouldShift { - pq.pool = pq.pool[1:] - pq.keyset[key] = item - pq.pool = append(pq.pool, item) - } - } -} - -func (pq *EvictionPool) Pop() *PoolItem { - if len(pq.pool) == 0 { - return nil - } - item := pq.pool[0] - pq.pool = pq.pool[1:] - delete(pq.keyset, item.keyPtr) - return item -} - -func NewEvictionPool(size int) *EvictionPool { - return &EvictionPool{ - pool: make([]*PoolItem, size), - keyset: make(map[string]*PoolItem), - } -} - -var ePoolSizeMax = 16 -var EPool = NewEvictionPool(0) diff --git a/internal/store/expire_test.go b/internal/store/expire_test.go index bace26856..91b371cf9 100644 --- a/internal/store/expire_test.go +++ b/internal/store/expire_test.go @@ -7,7 +7,7 @@ import ( ) func TestDelExpiry(t *testing.T) { - store := NewStore(nil, nil) + store := NewStore(nil, nil, nil) // Initialize the test environment store.store = NewStoreMap() store.expires = NewExpireMap() diff --git a/internal/store/lfu_eviction_test.go b/internal/store/lfu_eviction_test.go deleted file mode 100644 index da5cf5449..000000000 --- a/internal/store/lfu_eviction_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package store - -import ( - "testing" - "time" - - "github.com/dicedb/dice/config" - "github.com/dicedb/dice/internal/object" -) - -func TestLFUEviction(t *testing.T) { - originalEvictionPolicy := config.DiceConfig.Memory.EvictionPolicy - - store := NewStore(nil, nil) - config.DiceConfig.Memory.EvictionPolicy = config.EvictAllKeysLFU - - // Define test cases - tests := []struct { - name string - obj []*object.Obj - keys []string - setup func(string, *object.Obj) - perform func() - expectedKey string // false if we expect the key to be deleted - sleep bool - }{ - { - name: "Test LFU - eviction pool should have least recent key if all access keys are same", - keys: []string{"k1", "k2", "k3", "k4"}, - obj: []*object.Obj{{}, {}, {}, {}}, - setup: func(k string, obj *object.Obj) { - store.Put(k, obj) - }, - perform: func() { - PopulateEvictionPool(store) - }, - expectedKey: "k1", - sleep: true, - }, - { - name: "Test LFU - eviction pool should have least frequently used key", - keys: []string{"k1", "k2", "k3", "k4"}, - obj: []*object.Obj{{}, {}, {}, {}}, - setup: func(k string, obj *object.Obj) { - store.Put(k, obj) - }, - perform: func() { - // ensuring approximate counter is incremented at least one time - for i := 0; i < 100; i++ { - store.Get("k1") - store.Get("k2") - store.Get("k3") - } - - EPool = NewEvictionPool(0) - PopulateEvictionPool(store) - }, - expectedKey: "k4", - sleep: false, - }, - } - - // Run test cases - for _, tc := range tests { - store.ResetStore() - EPool = NewEvictionPool(0) - - t.Run(tc.name, func(t *testing.T) { - - for i := 0; i < len(tc.keys); i++ { - tc.setup(tc.keys[i], tc.obj[i]) - if tc.sleep { - time.Sleep(1000 * time.Millisecond) - } - } - - tc.perform() - - if EPool.pool[0].keyPtr != tc.expectedKey { - t.Errorf("Expected: %s but got: %s\n", tc.expectedKey, EPool.pool[0].keyPtr) - } - }) - } - - config.DiceConfig.Memory.EvictionPolicy = originalEvictionPolicy -} diff --git a/internal/store/store.go b/internal/store/store.go index a43c339ca..71f15876c 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -3,16 +3,14 @@ package store import ( "path" - "github.com/ohler55/ojg/jp" + "github.com/dicedb/dice/config" "github.com/dicedb/dice/internal/common" "github.com/dicedb/dice/internal/object" + "github.com/dicedb/dice/internal/server/utils" "github.com/dicedb/dice/internal/sql" + "github.com/ohler55/ojg/jp" "github.com/xwb1989/sqlparser" - - "github.com/dicedb/dice/internal/server/utils" - - "github.com/dicedb/dice/config" ) func NewStoreRegMap() common.ITable[string, *object.Obj] { @@ -35,6 +33,13 @@ func NewExpireMap() common.ITable[*object.Obj, uint64] { return NewExpireRegMap() } +func NewDefaultEviction() EvictionStrategy { + return &BatchEvictionLRU{ + maxKeys: config.DefaultKeysLimit, + evictionRatio: config.DefaultEvictionRatio, + } +} + // QueryWatchEvent represents a change in a watched key. type QueryWatchEvent struct { Key string @@ -48,20 +53,27 @@ type CmdWatchEvent struct { } type Store struct { - store common.ITable[string, *object.Obj] - expires common.ITable[*object.Obj, uint64] // Does not need to be thread-safe as it is only accessed by a single thread. - numKeys int - queryWatchChan chan QueryWatchEvent - cmdWatchChan chan CmdWatchEvent -} - -func NewStore(queryWatchChan chan QueryWatchEvent, cmdWatchChan chan CmdWatchEvent) *Store { - return &Store{ - store: NewStoreRegMap(), - expires: NewExpireRegMap(), - queryWatchChan: queryWatchChan, - cmdWatchChan: cmdWatchChan, + store common.ITable[string, *object.Obj] + expires common.ITable[*object.Obj, uint64] // Does not need to be thread-safe as it is only accessed by a single thread. + numKeys int + queryWatchChan chan QueryWatchEvent + cmdWatchChan chan CmdWatchEvent + evictionStrategy EvictionStrategy +} + +func NewStore(queryWatchChan chan QueryWatchEvent, cmdWatchChan chan CmdWatchEvent, evictionStrategy EvictionStrategy) *Store { + store := &Store{ + store: NewStoreRegMap(), + expires: NewExpireRegMap(), + queryWatchChan: queryWatchChan, + cmdWatchChan: cmdWatchChan, + evictionStrategy: evictionStrategy, } + if evictionStrategy == nil { + store.evictionStrategy = NewDefaultEviction() + } + + return store } func ResetStore(store *Store) *Store { @@ -119,9 +131,6 @@ func (store *Store) putHelper(k string, obj *object.Obj, opts ...PutOption) { optApplier(options) } - if store.store.Len() >= config.DiceConfig.Memory.KeysLimit { - store.evict() - } obj.LastAccessedAt = getCurrentClock() currentObject, ok := store.store.Get(k) if ok { @@ -134,9 +143,17 @@ func (store *Store) putHelper(k string, obj *object.Obj, opts ...PutOption) { } store.expires.Delete(currentObject) } else { + // TODO: Inform all the workers and shards about the eviction. + // TODO: Start the eviction only when all the workers and shards have acknowledged the eviction. + evictCount := store.evictionStrategy.ShouldEvict(store) + if evictCount > 0 { + store.evict(evictCount) + } store.numKeys++ } + store.store.Put(k, obj) + store.evictionStrategy.OnAccess(k, obj, AccessSet) if store.queryWatchChan != nil { store.notifyQueryManager(k, Set, *obj) @@ -148,17 +165,18 @@ func (store *Store) putHelper(k string, obj *object.Obj, opts ...PutOption) { // getHelper is a helper function to get the object from the store. It also updates the last accessed time if touch is true. func (store *Store) getHelper(k string, touch bool) *object.Obj { - var v *object.Obj - v, _ = store.store.Get(k) - if v != nil { - if hasExpired(v, store) { - store.deleteKey(k, v) - v = nil + var obj *object.Obj + obj, _ = store.store.Get(k) + if obj != nil { + if hasExpired(obj, store) { + store.deleteKey(k, obj) + obj = nil } else if touch { - v.LastAccessedAt = UpdateLastAccessedAt(v.LastAccessedAt) + obj.LastAccessedAt = getCurrentClock() + store.evictionStrategy.OnAccess(k, obj, AccessGet) } } - return v + return obj } func (store *Store) GetAll(keys []string) []*object.Obj { @@ -170,7 +188,7 @@ func (store *Store) GetAll(keys []string) []*object.Obj { store.deleteKey(k, v) response = append(response, nil) } else { - v.LastAccessedAt = UpdateLastAccessedAt(v.LastAccessedAt) + v.LastAccessedAt = getCurrentClock() response = append(response, v) } } else { @@ -293,6 +311,8 @@ func (store *Store) deleteKey(k string, obj *object.Obj, opts ...DelOption) bool store.expires.Delete(obj) store.numKeys-- + store.evictionStrategy.OnAccess(k, obj, AccessDel) + if store.queryWatchChan != nil { store.notifyQueryManager(k, Del, *obj) } @@ -352,3 +372,8 @@ func (store *Store) CacheKeysForQuery(whereClause sqlparser.Expr, cacheChannel c }) cacheChannel <- &shardCache } + +func (store *Store) evict(evictCount int) bool { + store.evictionStrategy.EvictVictims(store, evictCount) + return true +} diff --git a/main.go b/main.go index 6913fa75d..a52003d7a 100644 --- a/main.go +++ b/main.go @@ -67,9 +67,12 @@ func init() { flag.StringVar(&config.CustomConfigFilePath, "o", config.CustomConfigFilePath, "dir path to create the config file") flag.StringVar(&config.FileLocation, "c", config.FileLocation, "file path of the config file") flag.BoolVar(&config.InitConfigCmd, "init-config", false, "initialize a new config file") + flag.IntVar(&config.KeysLimit, "keys-limit", config.KeysLimit, "keys limit for the DiceDB server. "+ "This flag controls the number of keys each shard holds at startup. You can multiply this number with the "+ "total number of shard threads to estimate how much memory will be required at system start up.") + flag.Float64Var(&config.EvictionRatio, "eviction-ratio", 0.1, "ratio of keys to evict when the "+ + "keys limit is reached") flag.Parse()