1.配置
{
"servers": [
{
"default_log_level": "DEBUG",
"default_log_path": "stdout",
"routers": [
{
"router_config_name": "server_router",
"virtual_hosts": [
{
"name": "serverHost",
"domains": [
"*"
],
"routers": [
{
"match": {
"prefix": "/"
},
"route": {
"cluster_name": "serverCluster"
}
}
]
}
]
},
{
"router_config_name": "client_router",
"virtual_hosts": [
{
"name": "clientHost",
"domains": [
"*"
],
"routers": [
{
"match": {
"prefix": "/test1"
},
"route": {
"cluster_name": "clientCluster1"
}
},
{
"match": {
"prefix": "/test2"
},
"route": {
"cluster_name": "clientCluster2"
}
}
]
}
]
}
],
"listeners": [
{
"name": "serverListener",
"address": "127.0.0.1:2046",
"bind_port": true,
"filter_chains": [
{
"filters": [
{
"type": "proxy",
"config": {
"downstream_protocol": "Http1",
"upstream_protocol": "Http1",
"router_config_name": "server_router"
}
}
]
}
]
},
{
"name": "clientListener",
"address": "127.0.0.1:2045",
"bind_port": true,
"filter_chains": [
{
"filters": [
{
"type": "proxy",
"config": {
"downstream_protocol": "Http1",
"upstream_protocol": "Http1",
"router_config_name": "client_router"
}
}
]
}
]
}
]
}
],
"cluster_manager": {
"clusters": [
{
"name": "serverCluster",
"type": "SIMPLE",
"lb_type": "LB_RANDOM",
"max_request_per_conn": 1024,
"conn_buffer_limit_bytes": 32768,
"hosts": [
{
"address": "127.0.0.1:8080"
}
]
},
{
"name": "clientCluster1",
"health_check": {
"protocol": "Http1",
"timeout": "10s",
"interval": "1m",
"interval_jitter": "1m",
"healthy_threshold": 1,
"service_name": "test"
},
"type": "SIMPLE",
"lb_type": "LB_RANDOM",
"max_request_per_conn": 1024,
"conn_buffer_limit_bytes": 32768,
"hosts": [
{
"address": "127.0.0.1:8080"
}
]
},
{
"name": "clientCluster2",
"type": "SIMPLE",
"lb_type": "LB_RANDOM",
"max_request_per_conn": 1024,
"conn_buffer_limit_bytes": 32768,
"hosts": [
{
"address": "127.0.0.1:8081"
}
]
}
]
},
"admin": {
"address": {
"socket_address": {
"address": "0.0.0.0",
"port_value": 34901
}
}
}
}
127.0.0.1:2045/test2 到client_router这个router.这个router的配置是
{ "match": { "prefix": "/test1" }, "route": { "cluster_name": "clientCluster1" } }, { "match": { "prefix": "/test2" }, "route": { "cluster_name": "clientCluster2" } }
2.每个链接都会创建一个proxy.如图
3.这个proxy保存了路由到哪个router.
func NewProxy(ctx context.Context, config *v2.Proxy) Proxy { proxy := &proxy{ config: config, //这个config参数包含 router名称,这里肯定是client_router. clusterManager: cluster.GetClusterMngAdapterInstance().ClusterManager, //这个是保存cluster信息 activeStreams: list.New(), //多路复用的,因为每个链接都只有一个proxy. stats: globalStats, context: ctx, accessLogs: mosnctx.Get(ctx, types.ContextKeyAccessLogs).([]api.AccessLog), } //这里根据router名 获取具体的router配置.并包装了一下 if routersWrapper := router.GetRoutersMangerInstance().GetRouterWrapperByName(proxy.config.RouterConfigName); routersWrapper != nil { proxy.routersWrapper = routersWrapper } else { log.DefaultLogger.Alertf("proxy.config", "[proxy] RouterConfigName:%s doesn't exit", proxy.config.RouterConfigName) } proxy.downstreamListener = &downstreamCallbacks{ proxy: proxy, } //返回 return proxy }
4.接受数据,之后就开始进行路由/重试/等等.可以看前几篇,不多赘述.
func (s *downStream) OnReceive(ctx context.Context, headers types.HeaderMap, data types.IoBuffer, trailers types.HeaderMap) { fmt.Println("[downstream.go===downstream.OnReceive()]接受到请求数据后,开始处理这个请求") s.downstreamReqHeaders = headers s.context = mosnctx.WithValue(s.context, types.ContextKeyDownStreamHeaders, headers) s.downstreamReqDataBuf = data s.downstreamReqTrailers = trailers if log.Proxy.GetLogLevel() >= log.DEBUG { log.Proxy.Debugf(s.context, "[proxy] [downstream] OnReceive headers:%+v, data:%+v, trailers:%+v", headers, data, trailers) } id := s.ID fmt.Println("[downstream.go===pool.ScheduleAuto()]把任务扔到处理池") // goroutine for proxy pool.ScheduleAuto(func() { defer func() { if r := recover(); r != nil { log.Proxy.Errorf(s.context, "[proxy] [downstream] OnReceive panic: %v, downstream: %+v, oldId: %d, newId: %d\n%s", r, s, id, s.ID, string(debug.Stack())) if id == s.ID { s.cleanStream() } } }() fmt.Println("[downstream.go===pool.ScheduleAuto()]任务具体怎么处理呢?1.应该是最多重试10次") phase := types.InitPhase for i := 0; i < 10; i++ { s.cleanNotify() fmt.Println("[downstream.go===pool.ScheduleAuto()]核心处理,s.receive(id,phase)") //这个里面按照流程,有一堆的if phase = s.receive(ctx, id, phase) switch phase { case types.End: return case types.MatchRoute: log.Proxy.Debugf(s.context, "[proxy] [downstream] redo match route %+v", s) case types.Retry: log.Proxy.Debugf(s.context, "[proxy] [downstream] retry %+v", s) case types.UpFilter: log.Proxy.Debugf(s.context, "[proxy] [downstream] directResponse %+v", s) } } }) }
主要看
s.matchRoute()
//核心逻辑 func (s *downStream) matchRoute() { headers := s.downstreamReqHeaders //1.拿出刚刚塞进去的router routers := s.proxy.routersWrapper.GetRouters() //=====2.之前一直在找哪里去根据header进行匹配了..原来在这里======= // 2.do handler chain handlerChain := router.CallMakeHandlerChain(s.context, headers, routers, s.proxy.clusterManager) // handlerChain should never be nil if handlerChain == nil { log.Proxy.Alertf(s.context, types.ErrorKeyRouteMatch, "no route to make handler chain, headers = %v", headers) s.requestInfo.SetResponseFlag(api.NoRouteFound) s.sendHijackReply(types.RouterUnavailableCode, headers) return } //这里应该是重试的逻辑吧,轮询下一个 s.snapshot, s.route = handlerChain.DoNextHandler() } //2.1 到这里 func CallMakeHandlerChain(ctx context.Context, headers api.HeaderMap, routers types.Routers, clusterManager types.ClusterManager) *RouteHandlerChain { return makeHandlerChainOrder.makeHandlerChain(ctx, headers, routers, clusterManager) }
//2.2 到这里,根据header来找具体的cluster func DefaultMakeHandlerChain(ctx context.Context, headers api.HeaderMap, routers types.Routers, clusterManager types.ClusterManager) *RouteHandlerChain { var handlers []types.RouteHandler if r := routers.MatchRoute(headers, 1); r != nil { if log.Proxy.GetLogLevel() >= log.DEBUG { log.Proxy.Debugf(ctx, RouterLogFormat, "DefaultHandklerChain", "MatchRoute", fmt.Sprintf("matched a route: %v", r)) } handlers = append(handlers, &simpleHandler{route: r}) } return NewRouteHandlerChain(ctx, clusterManager, handlers) }
到此,路由就结束了.