mirror of
https://github.com/openimsdk/open-im-server.git
synced 2026-04-28 06:19:20 +08:00
Fix err (#2608)
* refactor: refactor workflows contents.
* add tool workflows.
* update field.
* fix: remove chat error.
* Fix err.
* fix error.
* remove cn comment.
* update workflows files.
* update infra config.
* move workflows.
* feat: update bot.
* fix: solve uncorrect outdated msg get.
* update get docIDs logic.
* update
* update skip logic.
* fix
* update.
* fix: delay deleteObject func.
* remove unused content.
* update log type.
* feat: implement request batch count limit.
* update
* update
* feat: add rocksTimeout
* feat: wrap logs
* feat: add logs
* feat: listen config
* feat: enable listen TIME_WAIT port
* feat: add logs
* feat: cache batch
* chore: enable fullUserCache
* feat: push rpc num
* feat: push err
* feat: with operationID
* feat: sleep
* feat: change 1s
* feat: change log
* feat: implement Getbatch in rpcCache.
* feat: print getOnline cost
* feat: change log
* feat: change kafka and push config
* feat: del interface
* feat: fix err
* feat: change config
* feat: go mod
* feat: change config
* feat: change config
* feat: add sleep in push
* feat: warn logs
* feat: logs
* feat: logs
* feat: change port
* feat: start config
* feat: remove port reuse
* feat: prometheus config
* feat: prometheus config
* feat: prometheus config
* feat: add long time send msg to grafana
* feat: init
* feat: init
* feat: implement offline push.
* feat: batch get user online
* feat: implement batch Push spilt
* update go mod
* Revert "feat: change port"
This reverts commit 06d5e944
* feat: change port
* feat: change config
* feat: implement kafka producer and consumer.
* update format,
* add PushMQ log.
* feat: get all online users and init push
* feat: lock in online cache
* feat: config
* fix: init online status
* fix: add logs
* fix: userIDs
* fix: add logs
* feat: update Handler logic.
* update MQ logic.
* update
* update
* fix: method name
* fix: update OfflinePushConsumerHandler.
* fix: prommetrics
* fix: add logs
* fix: ctx
* fix: log
* fix: config
* feat: change port
* fix: atomic online cache status
---------
Co-authored-by: Monet Lee <monet_lee@163.com>
This commit is contained in:
@@ -224,6 +224,7 @@ type Push struct {
|
||||
BadgeCount bool `mapstructure:"badgeCount"`
|
||||
Production bool `mapstructure:"production"`
|
||||
} `mapstructure:"iosPush"`
|
||||
FullUserCache bool `mapstructure:"fullUserCache"`
|
||||
}
|
||||
|
||||
type Auth struct {
|
||||
|
||||
@@ -54,15 +54,11 @@ func Start[T any](ctx context.Context, discovery *config.Discovery, prometheusCo
|
||||
log.CInfo(ctx, "RPC server is initializing", "rpcRegisterName", rpcRegisterName, "rpcPort", rpcPort,
|
||||
"prometheusPorts", prometheusConfig.Ports)
|
||||
rpcTcpAddr := net.JoinHostPort(network.GetListenIP(listenIP), strconv.Itoa(rpcPort))
|
||||
|
||||
listener, err := net.Listen(
|
||||
"tcp",
|
||||
rpcTcpAddr,
|
||||
)
|
||||
if err != nil {
|
||||
return errs.WrapMsg(err, "listen err", "rpcTcpAddr", rpcTcpAddr)
|
||||
}
|
||||
|
||||
defer listener.Close()
|
||||
client, err := kdisc.NewDiscoveryRegister(discovery, share)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
+8
-1
@@ -1,6 +1,9 @@
|
||||
package cachekey
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
OnlineKey = "ONLINE:"
|
||||
@@ -11,3 +14,7 @@ const (
|
||||
func GetOnlineKey(userID string) string {
|
||||
return OnlineKey + userID
|
||||
}
|
||||
|
||||
func GetOnlineKeyUserID(key string) string {
|
||||
return strings.TrimPrefix(key, OnlineKey)
|
||||
}
|
||||
|
||||
Vendored
+1
@@ -5,4 +5,5 @@ import "context"
|
||||
type OnlineCache interface {
|
||||
GetOnline(ctx context.Context, userID string) ([]int32, error)
|
||||
SetUserOnline(ctx context.Context, userID string, online, offline []int32) error
|
||||
GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error)
|
||||
}
|
||||
|
||||
+4
-2
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"github.com/dtm-labs/rockscache"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"golang.org/x/sync/singleflight"
|
||||
@@ -65,6 +66,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac
|
||||
}
|
||||
bs, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "marshal failed", err)
|
||||
return nil, err
|
||||
}
|
||||
cacheIndex[index] = string(bs)
|
||||
@@ -72,7 +74,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac
|
||||
return cacheIndex, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errs.WrapMsg(err, "FetchBatch2 failed")
|
||||
}
|
||||
for index, data := range indexCache {
|
||||
if data == "" {
|
||||
@@ -80,7 +82,7 @@ func batchGetCache2[K comparable, V any](ctx context.Context, rcClient *rockscac
|
||||
}
|
||||
var value V
|
||||
if err := json.Unmarshal([]byte(data), &value); err != nil {
|
||||
return nil, err
|
||||
return nil, errs.WrapMsg(err, "Unmarshal failed")
|
||||
}
|
||||
if cb, ok := any(&value).(BatchCacheCallback[K]); ok {
|
||||
cb.BatchCache(keyId[keys[index]])
|
||||
|
||||
@@ -28,6 +28,10 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
rocksCacheTimeout = 11 * time.Second
|
||||
)
|
||||
|
||||
// BatchDeleterRedis is a concrete implementation of the BatchDeleter interface based on Redis and RocksCache.
|
||||
type BatchDeleterRedis struct {
|
||||
redisClient redis.UniversalClient
|
||||
@@ -106,6 +110,8 @@ func (c *BatchDeleterRedis) AddKeys(keys ...string) {
|
||||
// GetRocksCacheOptions returns the default configuration options for RocksCache.
|
||||
func GetRocksCacheOptions() *rockscache.Options {
|
||||
opts := rockscache.NewDefaultOptions()
|
||||
opts.LockExpire = rocksCacheTimeout
|
||||
opts.WaitReplicasTimeout = rocksCacheTimeout
|
||||
opts.StrongConsistency = true
|
||||
opts.RandomExpireAdjustment = 0.2
|
||||
|
||||
|
||||
+32
@@ -2,8 +2,10 @@ package redis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/tools/errs"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/redis/go-redis/v9"
|
||||
@@ -49,6 +51,36 @@ func (s *userOnline) GetOnline(ctx context.Context, userID string) ([]int32, err
|
||||
return platformIDs, nil
|
||||
}
|
||||
|
||||
func (s *userOnline) GetAllOnlineUsers(ctx context.Context, cursor uint64) (map[string][]int32, uint64, error) {
|
||||
result := make(map[string][]int32)
|
||||
|
||||
keys, nextCursor, err := s.rdb.Scan(ctx, cursor, fmt.Sprintf("%s*", cachekey.OnlineKey), constant.ParamMaxLength).Result()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
userID := cachekey.GetOnlineKeyUserID(key)
|
||||
strValues, err := s.rdb.ZRange(ctx, key, 0, -1).Result()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
values := make([]int32, 0, len(strValues))
|
||||
for _, value := range strValues {
|
||||
intValue, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return nil, 0, errs.Wrap(err)
|
||||
}
|
||||
values = append(values, int32(intValue))
|
||||
}
|
||||
|
||||
result[userID] = values
|
||||
}
|
||||
|
||||
return result, nextCursor, nil
|
||||
}
|
||||
|
||||
func (s *userOnline) SetUserOnline(ctx context.Context, userID string, online, offline []int32) error {
|
||||
script := `
|
||||
local key = KEYS[1]
|
||||
|
||||
@@ -20,7 +20,9 @@ type EvictCallback[K comparable, V any] simplelru.EvictCallback[K, V]
|
||||
|
||||
type LRU[K comparable, V any] interface {
|
||||
Get(key K, fetch func() (V, error)) (V, error)
|
||||
Set(key K, value V)
|
||||
SetHas(key K, value V) bool
|
||||
GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error)
|
||||
Del(key K) bool
|
||||
Stop()
|
||||
}
|
||||
|
||||
@@ -51,6 +51,11 @@ type ExpirationLRU[K comparable, V any] struct {
|
||||
target Target
|
||||
}
|
||||
|
||||
func (x *ExpirationLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (x *ExpirationLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
|
||||
x.lock.Lock()
|
||||
v, ok := x.core.Get(key)
|
||||
@@ -99,5 +104,11 @@ func (x *ExpirationLRU[K, V]) SetHas(key K, value V) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *ExpirationLRU[K, V]) Set(key K, value V) {
|
||||
x.lock.Lock()
|
||||
defer x.lock.Unlock()
|
||||
x.core.Add(key, &expirationLruItem[V]{value: value})
|
||||
}
|
||||
|
||||
func (x *ExpirationLRU[K, V]) Stop() {
|
||||
}
|
||||
|
||||
@@ -88,18 +88,76 @@ func (x *LayLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
|
||||
return v.value, v.err
|
||||
}
|
||||
|
||||
//func (x *LayLRU[K, V]) Set(key K, value V) {
|
||||
// x.lock.Lock()
|
||||
// x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
|
||||
// x.lock.Unlock()
|
||||
//}
|
||||
//
|
||||
func (x *LayLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) {
|
||||
var (
|
||||
err error
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
x.lock.Lock()
|
||||
res := make(map[K]V)
|
||||
queries := make([]K, 0)
|
||||
setVs := make(map[K]*layLruItem[V])
|
||||
for _, key := range keys {
|
||||
v, ok := x.core.Get(key)
|
||||
if ok {
|
||||
x.lock.Unlock()
|
||||
v.lock.Lock()
|
||||
expires, value, err1 := v.expires, v.value, v.err
|
||||
if expires != 0 && expires > time.Now().UnixMilli() {
|
||||
v.lock.Unlock()
|
||||
x.target.IncrGetHit()
|
||||
res[key] = value
|
||||
if err1 != nil {
|
||||
once.Do(func() {
|
||||
err = err1
|
||||
})
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
queries = append(queries, key)
|
||||
x.lock.Unlock()
|
||||
}
|
||||
values, err1 := fetch(queries)
|
||||
if err1 != nil {
|
||||
once.Do(func() {
|
||||
err = err1
|
||||
})
|
||||
}
|
||||
for key, val := range values {
|
||||
v := &layLruItem[V]{}
|
||||
v.value = val
|
||||
|
||||
if err == nil {
|
||||
v.expires = time.Now().Add(x.successTTL).UnixMilli()
|
||||
x.target.IncrGetSuccess()
|
||||
} else {
|
||||
v.expires = time.Now().Add(x.failedTTL).UnixMilli()
|
||||
x.target.IncrGetFailed()
|
||||
}
|
||||
setVs[key] = v
|
||||
x.lock.Lock()
|
||||
x.core.Add(key, v)
|
||||
x.lock.Unlock()
|
||||
res[key] = val
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
//func (x *LayLRU[K, V]) Has(key K) bool {
|
||||
// x.lock.Lock()
|
||||
// defer x.lock.Unlock()
|
||||
// return x.core.Contains(key)
|
||||
//}
|
||||
|
||||
func (x *LayLRU[K, V]) Set(key K, value V) {
|
||||
x.lock.Lock()
|
||||
defer x.lock.Unlock()
|
||||
x.core.Add(key, &layLruItem[V]{value: value, expires: time.Now().Add(x.successTTL).UnixMilli()})
|
||||
}
|
||||
|
||||
func (x *LayLRU[K, V]) SetHas(key K, value V) bool {
|
||||
x.lock.Lock()
|
||||
defer x.lock.Unlock()
|
||||
|
||||
@@ -32,6 +32,29 @@ type slotLRU[K comparable, V any] struct {
|
||||
hash func(k K) uint64
|
||||
}
|
||||
|
||||
func (x *slotLRU[K, V]) GetBatch(keys []K, fetch func(keys []K) (map[K]V, error)) (map[K]V, error) {
|
||||
var (
|
||||
slotKeys = make(map[uint64][]K)
|
||||
vs = make(map[K]V)
|
||||
)
|
||||
|
||||
for _, k := range keys {
|
||||
index := x.getIndex(k)
|
||||
slotKeys[index] = append(slotKeys[index], k)
|
||||
}
|
||||
|
||||
for k, v := range slotKeys {
|
||||
batches, err := x.slots[k].GetBatch(v, fetch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for key, value := range batches {
|
||||
vs[key] = value
|
||||
}
|
||||
}
|
||||
return vs, nil
|
||||
}
|
||||
|
||||
func (x *slotLRU[K, V]) getIndex(k K) uint64 {
|
||||
return x.hash(k) % x.n
|
||||
}
|
||||
@@ -40,6 +63,10 @@ func (x *slotLRU[K, V]) Get(key K, fetch func() (V, error)) (V, error) {
|
||||
return x.slots[x.getIndex(key)].Get(key, fetch)
|
||||
}
|
||||
|
||||
func (x *slotLRU[K, V]) Set(key K, value V) {
|
||||
x.slots[x.getIndex(key)].Set(key, value)
|
||||
}
|
||||
|
||||
func (x *slotLRU[K, V]) SetHas(key K, value V) bool {
|
||||
return x.slots[x.getIndex(key)].SetHas(key, value)
|
||||
}
|
||||
|
||||
+243
-29
@@ -2,60 +2,197 @@ package rpccache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/openimsdk/protocol/constant"
|
||||
"github.com/openimsdk/protocol/user"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/common/storage/cache/cachekey"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/localcache/lru"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/rpcclient"
|
||||
"github.com/openimsdk/open-im-server/v3/pkg/util/useronline"
|
||||
"github.com/openimsdk/tools/db/cacheutil"
|
||||
"github.com/openimsdk/tools/log"
|
||||
"github.com/openimsdk/tools/mcontext"
|
||||
"github.com/redis/go-redis/v9"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) *OnlineCache {
|
||||
func NewOnlineCache(user rpcclient.UserRpcClient, group *GroupLocalCache, rdb redis.UniversalClient, fullUserCache bool, fn func(ctx context.Context, userID string, platformIDs []int32)) (*OnlineCache, error) {
|
||||
l := &sync.Mutex{}
|
||||
x := &OnlineCache{
|
||||
user: user,
|
||||
group: group,
|
||||
local: lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] {
|
||||
return lru.NewLayLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {})
|
||||
}),
|
||||
user: user,
|
||||
group: group,
|
||||
fullUserCache: fullUserCache,
|
||||
Lock: l,
|
||||
Cond: sync.NewCond(l),
|
||||
}
|
||||
|
||||
ctx := mcontext.SetOperationID(context.TODO(), strconv.FormatInt(time.Now().UnixNano()+int64(rand.Uint32()), 10))
|
||||
|
||||
switch x.fullUserCache {
|
||||
case true:
|
||||
log.ZDebug(ctx, "fullUserCache is true")
|
||||
x.mapCache = cacheutil.NewCache[string, []int32]()
|
||||
go func() {
|
||||
if err := x.initUsersOnlineStatus(ctx); err != nil {
|
||||
log.ZError(ctx, "initUsersOnlineStatus failed", err)
|
||||
}
|
||||
}()
|
||||
case false:
|
||||
log.ZDebug(ctx, "fullUserCache is false")
|
||||
x.lruCache = lru.NewSlotLRU(1024, localcache.LRUStringHash, func() lru.LRU[string, []int32] {
|
||||
return lru.NewLayLRU[string, []int32](2048, cachekey.OnlineExpire/2, time.Second*3, localcache.EmptyTarget{}, func(key string, value []int32) {})
|
||||
})
|
||||
x.CurrentPhase.Store(DoSubscribeOver)
|
||||
x.Cond.Broadcast()
|
||||
}
|
||||
|
||||
go func() {
|
||||
ctx := mcontext.SetOperationID(context.Background(), cachekey.OnlineChannel+strconv.FormatUint(rand.Uint64(), 10))
|
||||
for message := range rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel() {
|
||||
userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "OnlineCache setUserOnline redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel)
|
||||
continue
|
||||
}
|
||||
storageCache := x.setUserOnline(userID, platformIDs)
|
||||
log.ZDebug(ctx, "OnlineCache setUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache)
|
||||
if fn != nil {
|
||||
fn(ctx, userID, platformIDs)
|
||||
}
|
||||
}
|
||||
x.doSubscribe(ctx, rdb, fn)
|
||||
}()
|
||||
return x
|
||||
return x, nil
|
||||
}
|
||||
|
||||
const (
|
||||
Begin uint32 = iota
|
||||
DoOnlineStatusOver
|
||||
DoSubscribeOver
|
||||
)
|
||||
|
||||
type OnlineCache struct {
|
||||
user rpcclient.UserRpcClient
|
||||
group *GroupLocalCache
|
||||
local lru.LRU[string, []int32]
|
||||
|
||||
// fullUserCache if enabled, caches the online status of all users using mapCache;
|
||||
// otherwise, only a portion of users' online statuses (regardless of whether they are online) will be cached using lruCache.
|
||||
fullUserCache bool
|
||||
|
||||
lruCache lru.LRU[string, []int32]
|
||||
mapCache *cacheutil.Cache[string, []int32]
|
||||
|
||||
Lock *sync.Mutex
|
||||
Cond *sync.Cond
|
||||
CurrentPhase atomic.Uint32
|
||||
}
|
||||
|
||||
func (o *OnlineCache) initUsersOnlineStatus(ctx context.Context) (err error) {
|
||||
log.ZDebug(ctx, "init users online status begin")
|
||||
|
||||
var (
|
||||
totalSet atomic.Int64
|
||||
maxTries = 5
|
||||
retryInterval = time.Second * 5
|
||||
|
||||
resp *user.GetAllOnlineUsersResp
|
||||
)
|
||||
|
||||
defer func(t time.Time) {
|
||||
log.ZInfo(ctx, "init users online status end", "cost", time.Since(t), "totalSet", totalSet.Load())
|
||||
o.CurrentPhase.Store(DoOnlineStatusOver)
|
||||
o.Cond.Broadcast()
|
||||
}(time.Now())
|
||||
|
||||
retryOperation := func(operation func() error, operationName string) error {
|
||||
for i := 0; i < maxTries; i++ {
|
||||
if err = operation(); err != nil {
|
||||
log.ZWarn(ctx, fmt.Sprintf("initUsersOnlineStatus: %s failed", operationName), err)
|
||||
time.Sleep(retryInterval)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
cursor := uint64(0)
|
||||
for resp == nil || resp.NextCursor != 0 {
|
||||
if err = retryOperation(func() error {
|
||||
resp, err = o.user.GetAllOnlineUsers(ctx, cursor)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, u := range resp.StatusList {
|
||||
if u.Status == constant.Online {
|
||||
o.setUserOnline(u.UserID, u.PlatformIDs)
|
||||
}
|
||||
totalSet.Add(1)
|
||||
}
|
||||
cursor = resp.NextCursor
|
||||
return nil
|
||||
}, "getAllOnlineUsers"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OnlineCache) doSubscribe(ctx context.Context, rdb redis.UniversalClient, fn func(ctx context.Context, userID string, platformIDs []int32)) {
|
||||
o.Lock.Lock()
|
||||
ch := rdb.Subscribe(ctx, cachekey.OnlineChannel).Channel()
|
||||
for o.CurrentPhase.Load() < DoOnlineStatusOver {
|
||||
o.Cond.Wait()
|
||||
}
|
||||
o.Lock.Unlock()
|
||||
log.ZInfo(ctx, "begin doSubscribe")
|
||||
|
||||
doMessage := func(message *redis.Message) {
|
||||
userID, platformIDs, err := useronline.ParseUserOnlineStatus(message.Payload)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "OnlineCache setHasUserOnline redis subscribe parseUserOnlineStatus", err, "payload", message.Payload, "channel", message.Channel)
|
||||
return
|
||||
}
|
||||
log.ZDebug(ctx, fmt.Sprintf("get subscribe %s message", cachekey.OnlineChannel), "useID", userID, "platformIDs", platformIDs)
|
||||
switch o.fullUserCache {
|
||||
case true:
|
||||
if len(platformIDs) == 0 {
|
||||
// offline
|
||||
o.mapCache.Delete(userID)
|
||||
} else {
|
||||
o.mapCache.Store(userID, platformIDs)
|
||||
}
|
||||
case false:
|
||||
storageCache := o.setHasUserOnline(userID, platformIDs)
|
||||
log.ZDebug(ctx, "OnlineCache setHasUserOnline", "userID", userID, "platformIDs", platformIDs, "payload", message.Payload, "storageCache", storageCache)
|
||||
if fn != nil {
|
||||
fn(ctx, userID, platformIDs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if o.CurrentPhase.Load() == DoOnlineStatusOver {
|
||||
for done := false; !done; {
|
||||
select {
|
||||
case message := <-ch:
|
||||
doMessage(message)
|
||||
default:
|
||||
o.CurrentPhase.Store(DoSubscribeOver)
|
||||
o.Cond.Broadcast()
|
||||
done = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for message := range ch {
|
||||
doMessage(message)
|
||||
}
|
||||
}
|
||||
|
||||
func (o *OnlineCache) getUserOnlinePlatform(ctx context.Context, userID string) ([]int32, error) {
|
||||
platformIDs, err := o.local.Get(userID, func() ([]int32, error) {
|
||||
platformIDs, err := o.lruCache.Get(userID, func() ([]int32, error) {
|
||||
return o.user.GetUserOnlinePlatform(ctx, userID)
|
||||
})
|
||||
if err != nil {
|
||||
log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userID)
|
||||
return nil, err
|
||||
}
|
||||
log.ZDebug(ctx, "OnlineCache GetUserOnlinePlatform", "userID", userID, "platformIDs", platformIDs)
|
||||
//log.ZDebug(ctx, "OnlineCache GetUserOnlinePlatform", "userID", userID, "platformIDs", platformIDs)
|
||||
return platformIDs, nil
|
||||
}
|
||||
|
||||
@@ -69,6 +206,16 @@ func (o *OnlineCache) GetUserOnlinePlatform(ctx context.Context, userID string)
|
||||
return platformIDs, nil
|
||||
}
|
||||
|
||||
// func (o *OnlineCache) GetUserOnlinePlatformBatch(ctx context.Context, userIDs []string) (map[string]int32, error) {
|
||||
// platformIDs, err := o.getUserOnlinePlatform(ctx, userIDs)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// tmp := make([]int32, len(platformIDs))
|
||||
// copy(tmp, platformIDs)
|
||||
// return platformIDs, nil
|
||||
// }
|
||||
|
||||
func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, error) {
|
||||
platformIDs, err := o.getUserOnlinePlatform(ctx, userID)
|
||||
if err != nil {
|
||||
@@ -77,10 +224,68 @@ func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, e
|
||||
return len(platformIDs) > 0, nil
|
||||
}
|
||||
|
||||
func (o *OnlineCache) getUserOnlinePlatformBatch(ctx context.Context, userIDs []string) (map[string][]int32, error) {
|
||||
platformIDsMap, err := o.lruCache.GetBatch(userIDs, func(missingUsers []string) (map[string][]int32, error) {
|
||||
platformIDsMap := make(map[string][]int32)
|
||||
|
||||
usersStatus, err := o.user.GetUsersOnlinePlatform(ctx, missingUsers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, u := range usersStatus {
|
||||
platformIDsMap[u.UserID] = u.PlatformIDs
|
||||
}
|
||||
|
||||
return platformIDsMap, nil
|
||||
})
|
||||
if err != nil {
|
||||
log.ZError(ctx, "OnlineCache GetUserOnlinePlatform", err, "userID", userIDs)
|
||||
return nil, err
|
||||
}
|
||||
return platformIDsMap, nil
|
||||
}
|
||||
|
||||
func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, []string, error) {
|
||||
t := time.Now()
|
||||
|
||||
var (
|
||||
onlineUserIDs = make([]string, 0, len(userIDs))
|
||||
offlineUserIDs = make([]string, 0, len(userIDs))
|
||||
)
|
||||
|
||||
switch o.fullUserCache {
|
||||
case true:
|
||||
for _, userID := range userIDs {
|
||||
if _, ok := o.mapCache.Load(userID); ok {
|
||||
onlineUserIDs = append(onlineUserIDs, userID)
|
||||
} else {
|
||||
offlineUserIDs = append(offlineUserIDs, userID)
|
||||
}
|
||||
}
|
||||
case false:
|
||||
userOnlineMap, err := o.getUserOnlinePlatformBatch(ctx, userIDs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for key, value := range userOnlineMap {
|
||||
if len(value) > 0 {
|
||||
onlineUserIDs = append(onlineUserIDs, key)
|
||||
} else {
|
||||
offlineUserIDs = append(offlineUserIDs, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.ZInfo(ctx, "get users online", "online users length", len(userIDs), "offline users length", len(offlineUserIDs), "cost", time.Since(t))
|
||||
return userIDs, offlineUserIDs, nil
|
||||
}
|
||||
|
||||
//func (o *OnlineCache) GetUsersOnline(ctx context.Context, userIDs []string) ([]string, error) {
|
||||
// onlineUserIDs := make([]string, 0, len(userIDs))
|
||||
// for _, userID := range userIDs {
|
||||
// online, err := o.GetUserOnline(ctx, userID)
|
||||
// online, err := o.GetUserOnline(ctx, userID)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
@@ -111,6 +316,15 @@ func (o *OnlineCache) GetUserOnline(ctx context.Context, userID string) (bool, e
|
||||
// return onlineUserIDs, nil
|
||||
//}
|
||||
|
||||
func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) bool {
|
||||
return o.local.SetHas(userID, platformIDs)
|
||||
func (o *OnlineCache) setUserOnline(userID string, platformIDs []int32) {
|
||||
switch o.fullUserCache {
|
||||
case true:
|
||||
o.mapCache.Store(userID, platformIDs)
|
||||
case false:
|
||||
o.lruCache.Set(userID, platformIDs)
|
||||
}
|
||||
}
|
||||
|
||||
func (o *OnlineCache) setHasUserOnline(userID string, platformIDs []int32) bool {
|
||||
return o.lruCache.SetHas(userID, platformIDs)
|
||||
}
|
||||
|
||||
@@ -169,6 +169,15 @@ func (u *UserRpcClient) Access(ctx context.Context, ownerUserID string) error {
|
||||
return authverify.CheckAccessV3(ctx, ownerUserID, u.imAdminUserID)
|
||||
}
|
||||
|
||||
// GetAllUserID retrieves all user IDs with pagination options.
|
||||
func (u *UserRpcClient) GetAllUserID(ctx context.Context, pageNumber, showNumber int32) (*user.GetAllUserIDResp, error) {
|
||||
resp, err := u.Client.GetAllUserID(ctx, &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetAllUserIDs retrieves all user IDs with pagination options.
|
||||
func (u *UserRpcClient) GetAllUserIDs(ctx context.Context, pageNumber, showNumber int32) ([]string, error) {
|
||||
resp, err := u.Client.GetAllUserID(ctx, &user.GetAllUserIDReq{Pagination: &sdkws.RequestPagination{PageNumber: pageNumber, ShowNumber: showNumber}})
|
||||
@@ -215,3 +224,7 @@ func (u *UserRpcClient) GetUserOnlinePlatform(ctx context.Context, userID string
|
||||
}
|
||||
return resp[0].PlatformIDs, nil
|
||||
}
|
||||
|
||||
func (u *UserRpcClient) GetAllOnlineUsers(ctx context.Context, cursor uint64) (*user.GetAllOnlineUsersResp, error) {
|
||||
return u.Client.GetAllOnlineUsers(ctx, &user.GetAllOnlineUsersReq{Cursor: cursor})
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user