我们有一个基于go-socket.io(socket.io golang实现)和gorilla websocket的websocket服务,但似乎有内存泄漏问题。即使我使用debug.FreeOSMemroy强制释放内存,HeapAlloc总是增加。
服务很简单。它将使用jwt令牌验证请求,如果验证成功,则将基于gorilla websocket conn创建一个go-socket.io conn。但现在似乎 net / textproto。(* Reader).ReadMIMEHeader (包含在net / http。(* conn).serve中的net / http.readRequest)占用了大量内存让我感到困惑,因为当请求劫持websocket conn时,net / http。(* conn).serve goroutine将立即返回。
gc 440 @51045.897s 0%: 0.034+4182+0.96 ms clock, 0.13+0/4182/12246+3.8 ms cpu, 4304->4309->4143 MB, 8266 MB goal, 4 P
scvg340: inuse: 4404, idle: 15, sys: 4419, released: 0, consumed: 4419 (MB)
GC forced
gc 441 @51170.096s 0%: 3.7+4355+1.4 ms clock, 14+2.9/4357/12795+5.8 ms cpu, 4317->4323->4158 MB, 8287 MB goal, 4 P
GC forced
gc 442 @51294.460s 0%: 0.034+3987+1.2 ms clock, 0.13+1.5/3987/11701+4.9 ms cpu, 4336->4341->4169 MB, 8316 MB goal, 4 P
scvg341: inuse: 4318, idle: 133, sys: 4451, released: 0, consumed: 4451 (MB)
GC forced
gc 443 @51418.451s 0%: 0.36+3925+0.99 ms clock, 1.4+4.0/3925/11554+3.9 ms cpu, 4350->4356->4182 MB, 8338 MB goal, 4 P
scvg342: inuse: 4363, idle: 103, sys: 4466, released: 0, consumed: 4466 (MB)
GC forced
gc 444 @51542.394s 0%: 0.042+3986+1.6 ms clock, 0.16+0/3981/11757+6.5 ms cpu, 4361->4367->4194 MB, 8365 MB goal, 4 P
scvg343: inuse: 4404, idle: 74, sys: 4478, released: 0, consumed: 4478 (MB)
GC forced
gc 445 @51666.384s 0%: 3.4+3987+1.4 ms clock, 13+2.5/3986/11747+5.7 ms cpu, 4375->4382->4208 MB, 8388 MB goal, 4 P
scvg344: inuse: 4454, idle: 39, sys: 4493, released: 0, consumed: 4493 (MB)
GC forced
gc 446 @51790.379s 0%: 0.055+4147+1.5 ms clock, 0.22+0/4139/12125+6.2 ms cpu, 4396->4402->4220 MB, 8416 MB goal, 4 P
scvg345: inuse: 4509, idle: 5, sys: 4514, released: 0, consumed: 4514 (MB)
GC forced
gc 447 @51914.542s 0%: 0.052+4205+2.1 ms clock, 0.21+1.5/4199/12348+8.5 ms cpu, 4413->4420->4234 MB, 8441 MB goal, 4 P
GC forced
gc 448 @52038.752s 0%: 2.7+4517+1.8 ms clock, 11+2.3/4517/13245+7.2 ms cpu, 4428->4436->4247 MB, 8469 MB goal, 4 P
scvg346: inuse: 4406, idle: 142, sys: 4548, released: 0, consumed: 4548 (MB)
GC forced
gc 449 @52163.276s 0%: 0.033+4206+1.3 ms clock, 0.13+0/4206/12306+5.3 ms cpu, 4442->4449->4259 MB, 8495 MB goal, 4 P
scvg347: inuse: 4452, idle: 109, sys: 4561, released: 0, consumed: 4561 (MB)
GC forced
gc 450 @52287.491s 0%: 0.044+4262+2.0 ms clock, 0.17+0/4261/12565+8.2 ms cpu, 4452->4459->4272 MB, 8519 MB goal, 4 P
scvg348: inuse: 4498, idle: 74, sys: 4572, released: 0, consumed: 4572 (MB)
GC forced
gc 451 @52411.769s 0%: 0.028+4012+2.0 ms clock, 0.11+0.066/3992/11762+8.0 ms cpu, 4471->4477->4285 MB, 8544 MB goal, 4 P
scvg349: inuse: 4550, idle: 40, sys: 4590, released: 0, consumed: 4590 (MB)
代码示例
func (c *CometServer) initHTTPServer() {
jwtMiddleware := jwtmiddleware.New(jwtmiddleware.Options{
SigningMethod: jwt.SigningMethodHS256,
ValidationKeyGetter: func(token *jwt.Token) (interface{}, error) {
return []byte(setting.JwtSecret), nil
},
// get token from header, querystring, and cookie
Extractor: jwtmiddleware.FromFirst(
jwtmiddleware.FromParameter(setting.JwtTokenQueryStringField),
TokenFromCookie(setting.JwtTokenCookieField),
jwtmiddleware.FromAuthHeader,
),
ErrorHandler: jwtErrorHandler,
})
r := mux.NewRouter()
// Must call cotext.Clear after every request follow
r.KeepContext = true
socketHandler := jwtMiddleware.Handler(c.socketio)
r.Handle("/socket.io/", socketHandler)
if setting.DEBUG {
r.PathPrefix("/debug/").Handler(http.DefaultServeMux)
r.PathPrefix("/").Handler(http.FileServer(http.Dir("./asset/")))
}
// n.UseHandler(r)
c.httpServer = &http.Server{
Addr: setting.HTTPListenAddr,
Handler: r,
}
}
// .... // go-socket.io代码 // .... func(c * CometServer)initSocketIO()error { server,err:= socketio.NewServer(transports) if err!= nil { 回报错误 }
// ....
server.On("error", func(so socketio.Socket, err error) {
logger := SocketLogger(so)
logger.Error("socket connect error")
})
server.On(" connection",func(so socketio.Socket){
var (
uid string
exist bool
)
logger := SocketLogger(so)
claim := (context.Get(so.Request(), "user")).(*jwt.Token).Claims
// after get the claims, should clear the request context
context.Clear(so.Request())
var rawUID interface{}
if user, ok := claim.(jwt.MapClaims); ok {
if rawUID, ok = user[setting.JwtUserClaimField]; !ok {
logger.Error("invalid user claim")
so.Emit("disconnect", "invalid user claim")
}
} else {
logger.Errorf("invalid jwt claim %s", claim)
so.Emit("disconnect", "invalid user claim")
}
if uid, exist = rawUID.(string); exist {
// Multi connection for same user will be join to the same room
so.Join(uid)
// root for broadcast all user
so.Join(Hourse)
c.users.Add(uid, 1)
logger.Debug("socket connected")
if setting.DEBUG {
so.Emit("debug", fmt.Sprintf("Your uid is %s, sid is %s", uid, so.Id()))
}
} else {
so.Emit("disconnect", "invalid user claim")
}
so.On("debug", func(data string) {
log.Debugf("debug data from client %s", data)
})
so.On("disconnection", func(data string) {
logger.Debugf("socket disconnected")
c.users.Add(uid, -1)
})
})
c.socketio = server
return nil
}
答案 0 :(得分:1)
最后我解决了这个问题。
我使用https://github.com/auth0/go-jwt-middleware与JWT进行身份验证。我还打开了gorilla mux路由器的KeepContext为true,因为它可以从goriila上下文中获取请求的jwt令牌。当我获得jwt令牌时,将立即清除请求上下文(通过context.Clear(r))。到目前为止,一切都正常。 但是当go-jwt-middleware auth失败时,没有机会清除请求上下文,因为mux的KeepContext已经为真。这是根本原因。