mirror of
https://github.com/openimsdk/open-im-server.git
synced 2026-04-28 06:19:20 +08:00
feat: add long time push msg in prometheus (#2584)
* feat: add long time push msg in prometheus * fix: log print * fix: go mod * fix: log msg * fix: log init * feat: push msg * feat: go mod ,remove cgo package * feat: remove error log * feat: test dummy push * feat:redis pool config * feat: push to kafka log
This commit is contained in:
@@ -129,10 +129,11 @@ func (r *RootCmd) applyOptions(opts ...func(*CmdOpts)) *CmdOpts {
|
||||
}
|
||||
|
||||
func (r *RootCmd) initializeLogger(cmdOpts *CmdOpts) error {
|
||||
err := log.InitFromConfig(
|
||||
err := log.InitLoggerFromConfig(
|
||||
|
||||
cmdOpts.loggerPrefixName,
|
||||
r.processName,
|
||||
"", "",
|
||||
r.log.RemainLogLevel,
|
||||
r.log.IsStdout,
|
||||
r.log.IsJson,
|
||||
|
||||
@@ -336,7 +336,8 @@ type Redis struct {
|
||||
Password string `mapstructure:"password"`
|
||||
ClusterMode bool `mapstructure:"clusterMode"`
|
||||
DB int `mapstructure:"storage"`
|
||||
MaxRetry int `mapstructure:"MaxRetry"`
|
||||
MaxRetry int `mapstructure:"maxRetry"`
|
||||
PoolSize int `mapstructure:"poolSize"`
|
||||
}
|
||||
|
||||
type BeforeConfig struct {
|
||||
@@ -474,6 +475,7 @@ func (r *Redis) Build() *redisutil.Config {
|
||||
Password: r.Password,
|
||||
DB: r.DB,
|
||||
MaxRetry: r.MaxRetry,
|
||||
PoolSize: r.PoolSize,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -23,4 +23,8 @@ var (
|
||||
Name: "msg_offline_push_failed_total",
|
||||
Help: "The number of msg failed offline pushed",
|
||||
})
|
||||
MsgLoneTimePushCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "msg_long_time_push_total",
|
||||
Help: "The number of messages with a push time exceeding 10 seconds",
|
||||
})
|
||||
)
|
||||
|
||||
@@ -47,9 +47,17 @@ func GetGrpcCusMetrics(registerName string, share *config.Share) []prometheus.Co
|
||||
case share.RpcRegisterName.MessageGateway:
|
||||
return []prometheus.Collector{OnlineUserGauge}
|
||||
case share.RpcRegisterName.Msg:
|
||||
return []prometheus.Collector{SingleChatMsgProcessSuccessCounter, SingleChatMsgProcessFailedCounter, GroupChatMsgProcessSuccessCounter, GroupChatMsgProcessFailedCounter}
|
||||
return []prometheus.Collector{
|
||||
SingleChatMsgProcessSuccessCounter,
|
||||
SingleChatMsgProcessFailedCounter,
|
||||
GroupChatMsgProcessSuccessCounter,
|
||||
GroupChatMsgProcessFailedCounter,
|
||||
}
|
||||
case share.RpcRegisterName.Push:
|
||||
return []prometheus.Collector{MsgOfflinePushFailedCounter}
|
||||
return []prometheus.Collector{
|
||||
MsgOfflinePushFailedCounter,
|
||||
MsgLoneTimePushCounter,
|
||||
}
|
||||
case share.RpcRegisterName.Auth:
|
||||
return []prometheus.Collector{UserLoginCounter}
|
||||
case share.RpcRegisterName.User:
|
||||
|
||||
+1
-1
@@ -118,7 +118,7 @@ func getCache[T any](ctx context.Context, rcClient *rockscache.Client, key strin
|
||||
v, err := rcClient.Fetch2(ctx, key, expire, func() (s string, err error) {
|
||||
t, err = fn(ctx)
|
||||
if err != nil {
|
||||
log.ZError(ctx, "getCache query database failed", err, "key", key)
|
||||
//log.ZError(ctx, "getCache query database failed", err, "key", key)
|
||||
return "", err
|
||||
}
|
||||
bs, err := json.Marshal(t)
|
||||
|
||||
@@ -86,7 +86,7 @@ func (c *ConversationLocalCache) GetConversation(ctx context.Context, userID, co
|
||||
if err == nil {
|
||||
log.ZDebug(ctx, "ConversationLocalCache GetConversation return", "userID", userID, "conversationID", conversationID, "value", val)
|
||||
} else {
|
||||
log.ZError(ctx, "ConversationLocalCache GetConversation return", err, "userID", userID, "conversationID", conversationID)
|
||||
log.ZWarn(ctx, "ConversationLocalCache GetConversation return", err, "userID", userID, "conversationID", conversationID)
|
||||
}
|
||||
}()
|
||||
var cache cacheProto[pbconversation.Conversation]
|
||||
|
||||
Reference in New Issue
Block a user