tkcashgame_v4/pkg/cache/rockscache/client.go

319 lines
10 KiB
Go

package rockscache
import (
"context"
"encoding/json"
"errors"
"fmt"
"math"
"math/rand"
"time"
"github.com/go-redis/redis/v8"
"github.com/lithammer/shortuuid"
"golang.org/x/sync/singleflight"
)
const (
locked = "LOCKED"
ErrEmptyPlaceholder = "\u0001"
)
var ErrEmpty = errors.New("empty result")
// Options represents the options for rockscache client
type Options struct {
// Delay is the delay delete time for keys that are tag deleted. default is 10s
Delay time.Duration
// EmptyExpire is the expire time for empty result. default is 60s
EmptyExpire time.Duration
// LockExpire is the expire time for the lock which is allocated when updating cache. default is 3s
// should be set to the max of the underling data calculating time.
LockExpire time.Duration
// LockSleep is the sleep interval time if try lock failed. default is 100ms
LockSleep time.Duration
// WaitReplicas is the number of replicas to wait for. default is 0
// if WaitReplicas is > 0, it will use redis WAIT command to wait for TagAsDeleted synchronized.
WaitReplicas int
// WaitReplicasTimeout is the number of replicas to wait for. default is 3000ms
// if WaitReplicas is > 0, WaitReplicasTimeout is the timeout for WAIT command.
WaitReplicasTimeout time.Duration
// RandomExpireAdjustment is the random adjustment for the expire time. default 0.1
// if the expire time is set to 600s, and this value is set to 0.1, then the actual expire time will be 540s - 600s
// solve the problem of cache avalanche.
RandomExpireAdjustment float64
// CacheReadDisabled is the flag to disable read cache. default is false
// when redis is down, set this flat to downgrade.
DisableCacheRead bool
// CacheDeleteDisabled is the flag to disable delete cache. default is false
// when redis is down, set this flat to downgrade.
DisableCacheDelete bool
// StrongConsistency is the flag to enable strong consistency. default is false
// if enabled, the Fetch result will be consistent with the db result, but performance is bad.
StrongConsistency bool
// Prefix flag to redis key namespace
Prefix string
}
// NewDefaultOptions return default options
func NewDefaultOptions() Options {
return Options{
Delay: 10 * time.Second,
EmptyExpire: 60 * time.Second,
LockExpire: 3 * time.Second,
LockSleep: 100 * time.Millisecond,
RandomExpireAdjustment: 0.1,
WaitReplicasTimeout: 3000 * time.Millisecond,
}
}
type Serializable interface {
[]byte | string
}
// Client delay client
type Client[T any] struct {
rdb redis.UniversalClient
Options Options
group singleflight.Group
}
// NewClient return a new rockscache client
// for each key, rockscache client store a hash set,
// the hash set contains the following fields:
// value: the value of the key
// lockUntil: the time when the lock is released.
// lockOwner: the owner of the lock.
// if a thread query the cache for data, and no cache exists, it will lock the key before querying data in DB
func NewClient[T any](rdb redis.UniversalClient, options Options) *Client[T] {
if options.Delay == 0 || options.LockExpire == 0 {
panic("cache options error: Delay and LockExpire should not be 0, you should call NewDefaultOptions() to get default options")
}
return &Client[T]{rdb: rdb, Options: options}
}
// TagAsDeleted a key, the key will expire after delay time.
func (c *Client[T]) TagAsDeleted(key string) error {
return c.TagAsDeleted2(c.rdb.Context(), key)
}
// TagAsDeleted2 a key, the key will expire after delay time.
func (c *Client[T]) TagAsDeleted2(ctx context.Context, key string) error {
if c.Options.DisableCacheDelete {
return nil
}
debugf("deleting: key=%s", key)
luaFn := func(con redisConn) error {
_, err := callLua(ctx, con, ` -- delete
redis.call('HSET', KEYS[1], 'lockUntil', 0)
redis.call('HDEL', KEYS[1], 'lockOwner')
redis.call('EXPIRE', KEYS[1], ARGV[1])
`, []string{key}, []interface{}{int64(c.Options.Delay / time.Second)})
return err
}
if c.Options.WaitReplicas > 0 {
err := luaFn(c.rdb)
cmd := redis.NewCmd(ctx, "WAIT", c.Options.WaitReplicas, c.Options.WaitReplicasTimeout)
if err == nil {
err = c.rdb.Process(ctx, cmd)
}
var replicas int
if err == nil {
replicas, err = cmd.Int()
}
if err == nil && replicas < c.Options.WaitReplicas {
err = fmt.Errorf("wait replicas %d failed. result replicas: %d", c.Options.WaitReplicas, replicas)
}
return err
}
return luaFn(c.rdb)
}
// Fetch returns the value store in cache indexed by the key.
// If the key doest not exists, call fn to get result, store it in cache, then return.
func (c *Client[T]) Fetch(key string, expire time.Duration, fn func() (T, error)) (T, error) {
return c.Fetch2(c.rdb.Context(), key, expire, fn)
}
// Fetch2 returns the value store in cache indexed by the key.
// If the key doest not exists, call fn to get result, store it in cache, then return.
func (c *Client[T]) Fetch2(ctx context.Context, key string, expire time.Duration, fn func() (T, error)) (t T, err error) {
key = c.Options.Prefix + key
ex := expire - c.Options.Delay - time.Duration(rand.Float64()*c.Options.RandomExpireAdjustment*float64(expire))
v, err, _ := c.group.Do(key, func() (interface{}, error) {
if c.Options.DisableCacheRead {
return fn()
} else if c.Options.StrongConsistency {
return c.strongFetch(ctx, key, ex, fn)
}
return c.weakFetch(ctx, key, ex, fn)
})
if err != nil {
return
}
return v.(T), nil
}
func (c *Client[T]) luaGet(ctx context.Context, key string, owner string) ([]interface{}, error) {
res, err := callLua(ctx, c.rdb, ` -- luaGet
local v = redis.call('HGET', KEYS[1], 'value')
local lu = redis.call('HGET', KEYS[1], 'lockUntil')
if lu ~= false and tonumber(lu) < tonumber(ARGV[1]) or lu == false and v == false then
redis.call('HSET', KEYS[1], 'lockUntil', ARGV[2])
redis.call('HSET', KEYS[1], 'lockOwner', ARGV[3])
return { v, 'LOCKED' }
end
return {v, lu}
`, []string{key}, []interface{}{now(), now() + int64(c.Options.LockExpire/time.Second), owner})
debugf("luaGet return: %v, %v", res, err)
if err != nil {
return nil, err
}
return res.([]interface{}), nil
}
func (c *Client[T]) luaSet(ctx context.Context, key string, value string, expire int, owner string) error {
_, err := callLua(ctx, c.rdb, `-- luaSet
local o = redis.call('HGET', KEYS[1], 'lockOwner')
if o ~= ARGV[2] then
return
end
redis.call('HSET', KEYS[1], 'value', ARGV[1])
redis.call('HDEL', KEYS[1], 'lockUntil')
redis.call('HDEL', KEYS[1], 'lockOwner')
redis.call('EXPIRE', KEYS[1], ARGV[3])
`, []string{key}, []interface{}{value, owner, expire})
return err
}
func (c *Client[T]) fetchNew(ctx context.Context, key string, expire time.Duration, owner string, fn func() (T, error)) (t T, err error) {
t, err = fn()
isEmpty := err == ErrEmpty
if err != nil && err != ErrEmpty {
_ = c.UnlockForUpdate(ctx, key, owner)
return
}
var data = ""
if err == ErrEmpty {
data = ErrEmptyPlaceholder
if c.Options.EmptyExpire == 0 { // if empty expire is 0, then delete the key
err = c.rdb.Del(ctx, key).Err()
return
}
expire = c.Options.EmptyExpire
} else {
bytes, e := json.Marshal(t)
if e != nil {
_ = c.UnlockForUpdate(ctx, key, owner)
err = e
return
}
data = string(bytes)
}
err = c.luaSet(ctx, key, data, int(expire/time.Second), owner)
if isEmpty {
err = ErrEmpty
return
}
return
}
func (c *Client[T]) weakFetch(ctx context.Context, key string, expire time.Duration, fn func() (T, error)) (t T, err error) {
debugf("weakFetch: key=%s", key)
owner := shortuuid.New()
r, err := c.luaGet(ctx, key, owner)
for err == nil && r[0] == nil && r[1].(string) != locked {
debugf("empty result for %s locked by other, so sleep %s", key, c.Options.LockSleep.String())
time.Sleep(c.Options.LockSleep)
r, err = c.luaGet(ctx, key, owner)
}
if err != nil {
return t, err
}
if r[0] == ErrEmptyPlaceholder {
err = ErrEmpty
return
}
if r[1] != locked { // normal value
err = json.Unmarshal([]byte(r[0].(string)), &t)
return
}
if r[0] == nil {
return c.fetchNew(ctx, key, expire, owner, fn)
}
//go withRecover(func() {
// _, _ = c.fetchNew(ctx, key, expire, owner, fn)
//})
err = json.Unmarshal([]byte(r[0].(string)), &t)
return
}
func (c *Client[T]) strongFetch(ctx context.Context, key string, expire time.Duration, fn func() (T, error)) (t T, err error) {
debugf("strongFetch: key=%s", key)
owner := shortuuid.New()
r, err := c.luaGet(ctx, key, owner)
for err == nil && r[1] != nil && r[1] != locked { // locked by other
debugf("locked by other, so sleep %s", c.Options.LockSleep)
time.Sleep(c.Options.LockSleep)
r, err = c.luaGet(ctx, key, owner)
}
if err != nil {
return
}
if r[0] == ErrEmptyPlaceholder {
err = ErrEmpty
return
}
if r[1] != locked { // normal value
err = json.Unmarshal([]byte(r[0].(string)), &t)
return
}
return c.fetchNew(ctx, key, expire, owner, fn)
}
// RawGet returns the value store in cache indexed by the key, no matter if the key locked or not
func (c *Client[T]) RawGet(ctx context.Context, key string) (string, error) {
return c.rdb.HGet(ctx, key, "value").Result()
}
// RawSet sets the value store in cache indexed by the key, no matter if the key locked or not
func (c *Client[T]) RawSet(ctx context.Context, key string, value string, expire time.Duration) error {
err := c.rdb.HSet(ctx, key, "value", value).Err()
if err == nil {
err = c.rdb.Expire(ctx, key, expire).Err()
}
return err
}
// LockForUpdate locks the key, used in very strict strong consistency mode
func (c *Client[T]) LockForUpdate(ctx context.Context, key string, owner string) error {
lockUntil := math.Pow10(10)
res, err := callLua(ctx, c.rdb, ` -- luaLock
local lu = redis.call('HGET', KEYS[1], 'lockUntil')
local lo = redis.call('HGET', KEYS[1], 'lockOwner')
if lu == false or tonumber(lu) < tonumber(ARGV[2]) or lo == ARGV[1] then
redis.call('HSET', KEYS[1], 'lockUntil', ARGV[2])
redis.call('HSET', KEYS[1], 'lockOwner', ARGV[1])
return 'LOCKED'
end
return lo
`, []string{key}, []interface{}{owner, lockUntil})
if err == nil && res != "LOCKED" {
return fmt.Errorf("%s has been locked by %s", key, res)
}
return err
}
// UnlockForUpdate unlocks the key, used in very strict strong consistency mode
func (c *Client[T]) UnlockForUpdate(ctx context.Context, key string, owner string) error {
_, err := callLua(ctx, c.rdb, ` -- luaUnlock
local lo = redis.call('HGET', KEYS[1], 'lockOwner')
if lo == ARGV[1] then
redis.call('HSET', KEYS[1], 'lockUntil', 0)
redis.call('HDEL', KEYS[1], 'lockOwner')
redis.call('EXPIRE', KEYS[1], ARGV[2])
end
`, []string{key}, []interface{}{owner, c.Options.LockExpire / time.Second})
return err
}