goim源码解读-全局概览

架构图

arch

对这个架构做一下简单说明:

1.logic启动http服务器, 接受http请求,用于将数据推送到kafka以及获取在线用户信息,websocket身份校验

2.comet组件起动webdocket/tcp服务, 管理连接, 并负责将数据推送至指定连接

3. job组件订阅指定kafka指定频道的消息信息, 开启管道监听(将获得的数据推送到comet当中某个链接上)

  从discovery当中找到comet组件

4. discovery负责监控以上组件的活动状态

核心依赖库

    //配置文件操作
    github.com/BurntSushi/toml v0.3.1
    //kafka相关
	github.com/Shopify/sarama v1.19.0 // indirect
    //discovery依赖
	github.com/bilibili/discovery v1.0.1
    //kafka相关
	github.com/bsm/sarama-cluster v2.1.15+incompatible
	github.com/davecgh/go-spew v1.1.1 // indirect
	github.com/eapache/go-resiliency v1.1.0 // indirect
	github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect
	github.com/eapache/queue v1.1.0 // indirect
    //http请求处理库
	github.com/gin-gonic/gin v1.3.0
    //grpc数据序列化库
	github.com/gogo/protobuf v1.1.1
	github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
	github.com/golang/protobuf v1.2.0
	github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
    //redis操作
	github.com/gomodule/redigo v2.0.0+incompatible
	github.com/google/uuid v1.0.0
	github.com/issue9/assert v1.0.0
	github.com/pierrec/lz4 v2.0.5+incompatible // indirect
	github.com/pkg/errors v0.8.0
	github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a // indirect
	github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d // indirect
	github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a
	github.com/stretchr/testify v1.3.0
	github.com/thinkboy/log4go v0.0.0-20160303045050-f91a411e4a18
	github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43
	github.com/zhenjl/cityhash v0.0.0-20131128155616-cdd6a94144ab
	golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1
    //远程服务调用相关rpc库
	google.golang.org/grpc v1.16.0
    //kafka相关库
	gopkg.in/Shopify/sarama.v1 v1.19.0
	gopkg.in/yaml.v2 v2.2.2 // indirect

 在配置中做了注释, 这些库很重要,特别是grpc, redis, kafka的一些操作, 对于goim框架的理解很重要

分组件代码分析

discovery组件的实现,暂时不考虑

1. comet处理websocket/tcp连接(cmd/comet/main.go代码中可以看到, 启用tcp与websocket监听服务)

func main() {
	flag.Parse()
    //配置初始化
	if err := conf.Init(); err != nil {
		panic(err)
	}
	rand.Seed(time.Now().UTC().UnixNano())
	runtime.GOMAXPROCS(runtime.NumCPU())
	log.Infof("goim-comet [version: %s env: %+v] start", ver, conf.Conf.Env)
	// register discovery
	dis := naming.New(conf.Conf.Discovery)
	resolver.Register(dis)
	// new comet server
	srv := comet.NewServer(conf.Conf)
	if err := comet.InitWhitelist(conf.Conf.Whitelist); err != nil {
		panic(err)
	}
    //初始化TCP服务
	if err := comet.InitTCP(srv, conf.Conf.TCP.Bind, runtime.NumCPU()); err != nil {
		panic(err)
	}
    //初始化websocket服务
	if err := comet.InitWebsocket(srv, conf.Conf.Websocket.Bind, runtime.NumCPU()); err != nil {
		panic(err)
	}
    //使用tls传输方式的websocket验证
	if conf.Conf.Websocket.TLSOpen {
		if err := comet.InitWebsocketWithTLS(srv, conf.Conf.Websocket.TLSBind, conf.Conf.Websocket.CertFile, conf.Conf.Websocket.PrivateFile, runtime.NumCPU()); err != nil {
			panic(err)
		}
	}
	// new grpc server
	rpcSrv := grpc.New(conf.Conf.RPCServer, srv)
	//多余的代码就不看了
}

2. logic处理http请求(启用http服务,rpc服务,供其他组件进行调用)

cmd/logic/main.go

func main() {
	flag.Parse()
	if err := conf.Init(); err != nil {
		panic(err)
	}
	log.Infof("goim-logic [version: %s env: %+v] start", ver, conf.Conf.Env)
	// grpc register naming
	dis := naming.New(conf.Conf.Discovery)
	resolver.Register(dis)
	// logic
	srv := logic.New(conf.Conf)
    //启动http监听服务, 监听来自客户端的http请求
	httpSrv := http.New(conf.Conf.HTTPServer, srv)
    //启动grpc服务, 监听来自其他组件的rpc调用
	rpcSrv := grpc.New(conf.Conf.RPCServer, srv)
	...
}

 internal/logic/http/server.go

func New(c *conf.HTTPServer, l *logic.Logic) *Server {
	engine := gin.New()
	engine.Use(loggerHandler, recoverHandler)
	go func() {
		if err := engine.Run(c.Addr); err != nil {
			panic(err)
		}
	}()
	s := &Server{
		engine: engine,
		logic:  l,
	}
    //初始化路由(测试例子当中的请求uri就是这边设置映射的)
	s.initRouter()
	return s
}
...
//初始化http路由
func (s *Server) initRouter() {
	group := s.engine.Group("/goim")
	group.POST("/push/keys", s.pushKeys)
	group.POST("/push/mids", s.pushMids)
	group.POST("/push/room", s.pushRoom)
	group.POST("/push/all", s.pushAll)
	group.GET("/online/top", s.onlineTop)
	group.GET("/online/room", s.onlineRoom)
	group.GET("/online/total", s.onlineTotal)
	group.GET("/nodes/weighted", s.nodesWeighted)
	group.GET("/nodes/instances", s.nodesInstances)
}

 

internal/logic/grpc/server.go

func New(c *conf.RPCServer, l *logic.Logic) *grpc.Server {
	keepParams := grpc.KeepaliveParams(keepalive.ServerParameters{
		MaxConnectionIdle:     time.Duration(c.IdleTimeout),
		MaxConnectionAgeGrace: time.Duration(c.ForceCloseWait),
		Time:             time.Duration(c.KeepAliveInterval),
		Timeout:          time.Duration(c.KeepAliveTimeout),
		MaxConnectionAge: time.Duration(c.MaxLifeTime),
	})
    //创建rpc服务
	srv := grpc.NewServer(keepParams)
    //注册rpc服务(做一些路由映射..)
	pb.RegisterLogicServer(srv, &server{l})
	lis, err := net.Listen(c.Network, c.Addr)
	if err != nil {
		panic(err)
	}
	go func() {
		if err := srv.Serve(lis); err != nil {
			panic(err)
		}
	}()
	return srv
}

3. job组件(创建kafka订阅服务,对comet组件进行监听

func main() {
	flag.Parse()
	if err := conf.Init(); err != nil {
		panic(err)
	}
	log.Infof("goim-job [version: %s env: %+v] start", ver, conf.Conf.Env)
	// grpc register naming
	dis := naming.New(conf.Conf.Discovery)
	resolver.Register(dis)
	// job
	j := job.New(conf.Conf)
	go j.Consume()
	...
}

internal/job/job.go 具体实现

func New(c *conf.Config) *Job {
	j := &Job{
		c:        c,
		consumer: newKafkaSub(c.Kafka),
		rooms:    make(map[string]*Room),
	}
	j.watchComet(c.Discovery)
	return j
}

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

问道飞鱼

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值