一、istio网格功能案例
1.1、简单功能案例 V1
1.1.1、案例图示
- 两个应用
- frontend(proxy) : 前端应用,会请求后端的 demoapp
- service : proxy
- demoapp : 后端应用
- service : demoappv10
- frontend(proxy) : 前端应用,会请求后端的 demoapp
1.1.2、案例实验
1、demoappv10 版本
root@native01:~/istio/istio-demo# kubectl create deployment demoappv10 --image=registry.cn-wulanchabu.aliyuncs.com/daizhe/demoapp:v1.0 --replicas=3 --dry-run=client -o yaml > deploy-demoapp-v10.yaml
root@native01:~/istio/istio-demo# cat deploy-demoapp-v10.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: demoapp
name: demoappv10
spec:
replicas: 3
selector:
matchLabels:
app: demoapp
version: v1.0
template:
metadata:
creationTimestamp: null
labels:
app: demoapp
version: v1.0
spec:
containers:
- image: registry.cn-wulanchabu.aliyuncs.com/daizhe/demoapp
name: demoapp
env:
- name: PORT
value: "8080"
resources: {}
status: {}
# 要想能够被 istio发现并作为服务作为网格内部使用,需要创建service
root@native01:~/istio/istio-demo# kubectl create service clusterip demoappv10 --tcp=8080:8080 --dry-run=client -o yaml > service-demoapp-v10.yaml
root@native01:~/istio/istio-demo# cat service-demoapp-v10.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: demoapp
name: demoappv10
spec:
ports:
- name: http-8080
port: 8080
protocol: TCP
targetPort: 8080
selector:
app: demoapp
version: v1.0
type: ClusterIP
# 创建
root@native01:~/istio/istio-demo# kubectl apply -f .
deployment.apps/demoappv10 created
service/demoappv10 unchanged
root@native01:~/istio/istio-demo# kubectl get pods
NAME READY STATUS RESTARTS AGE
demoappv10-65cdf575c8-cqx5b 2/2 Running 0 25s
demoappv10-65cdf575c8-fc7cv 2/2 Running 0 25s
demoappv10-65cdf575c8-nwftc 2/2 Running 0 25s
root@native01:~/istio/istio-demo# DEMOAPP=$(kubectl get pods -l app=demoapp -o jsonpath={.items[0].metadata.name})
root@native01:~/istio/istio-demo# echo $DEMOAPP
demoappv10-5c497c6f7c-24dk4
# 查看istio proxy配置是否同步
root@native01:~/istio/istio-demo# istioctl proxy-status
NAME CDS LDS EDS RDS ISTIOD VERSION
demoappv10-5c497c6f7c-24dk4.default SYNCED SYNCED SYNCED SYNCED istiod-76d66d9876-lqgph 1.12.1
demoappv10-5c497c6f7c-fdwf4.default SYNCED SYNCED SYNCED SYNCED istiod-76d66d9876-lqgph 1.12.1
demoappv10-5c497c6f7c-ks5hk.default SYNCED SYNCED SYNCED SYNCED istiod-76d66d9876-lqgph 1.12.1
# 查看侦听器
root@native01:~/istio/istio-demo# istioctl proxy-config listeners $DEMOAPP --port=8080
ADDRESS PORT MATCH DESTINATION
0.0.0.0 8080 Trans: raw_buffer; App: HTTP Route: 8080
0.0.0.0 8080 ALL PassthroughCluster
# 查看路由信息
root@native01:~/istio/istio-demo# istioctl proxy-config routes $DEMOAPP | grep "demoappv10"
8080 demoappv10, demoappv10.default + 1 more... /*
# 查看集群信息
root@native01:~/istio/istio-demo# istioctl proxy-config clusters $DEMOAPP | grep "demoappv10"
demoappv10.default.svc.cluster.local 8080 - outbound EDS
# 查看后端端点信息
root@native01:~/istio/istio-demo# istioctl proxy-config endpoints $DEMOAPP | grep "demoappv10"
10.220.104.135:8080 HEALTHY OK outbound|8080||demoappv10.default.svc.cluster.local
10.220.104.139:8080 HEALTHY OK outbound|8080||demoappv10.default.svc.cluster.local
10.220.104.140:8080 HEALTHY OK outbound|8080||demoappv10.default.svc.cluster.local
2、访问测试(网格内启动客户端进行访问测试)
root@native01:~/istio/istio-demo# kubectl run client --image=registry.cn-wulanchabu.aliyuncs.com/daizhe/admin-box -it --rm --restart=Never --command -- /bin/sh
If you don t see a command prompt, try pressing enter.
# client 也会被注入sidecar,所以访问demoappv10时也会被出栈侦听器拦截,而后由出栈侦听器所调度;
root@client # curl demoappv10:8080
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.6, ServerName: demoappv10-5c497c6f7c-ks5hk, ServerIP: 10.220.104.151!
root@client # curl demoappv10:8080
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.6, ServerName: demoappv10-5c497c6f7c-fdwf4, ServerIP: 10.220.104.147!
root@client # curl demoappv10:8080
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.6, ServerName: demoappv10-5c497c6f7c-24dk4, ServerIP: 10.220.104.143!
root@client # while true;do curl demoappv10:8080; sleep 0.$RANDOM; done
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.6, ServerName: demoappv10-5c497c6f7c-ks5hk, ServerIP: 10.220.104.151!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.6, ServerName: demoappv10-5c497c6f7c-24dk4, ServerIP: 10.220.104.143!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.6, ServerName: demoappv10-5c497c6f7c-fdwf4, ServerIP: 10.220.104.147!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.6, ServerName: demoappv10-5c497c6f7c-fdwf4, ServerIP: 10.220.104.147!
iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.6, ServerName: demoappv10-5c497c6f7c-ks5hk, ServerIP: 10.220.104.151!
- graph能够根据流量实时进行图形绘制
3、创建前端代理应用 frontend proxy
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/01-demoapp-v10# cat deploy-proxy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: proxy
spec:
progressDeadlineSeconds: 600
replicas: 1
selector:
matchLabels:
app: proxy
template:
metadata:
labels:
app: proxy
spec:
containers:
- env:
- name: PROXYURL
value: http://demoappv10:8080 # 请求demoappv10
image: registry.cn-wulanchabu.aliyuncs.com/daizhe/proxy:v0.1.1
imagePullPolicy: IfNotPresent
name: proxy
ports:
- containerPort: 8080
name: web
protocol: TCP
resources:
limits:
cpu: 50m
---
apiVersion: v1
kind: Service
metadata:
name: proxy
spec:
ports:
- name: http-80
port: 80
protocol: TCP
targetPort: 8080
selector:
app: proxy
---
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/01-demoapp-v10# kubectl apply -f deploy-proxy.yaml
deployment.apps/proxy created
service/proxy created
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/01-demoapp-v10# kubectl get pods
NAME READY STATUS RESTARTS AGE
demoappv10-5c497c6f7c-24dk4 2/2 Running 2 18h
demoappv10-5c497c6f7c-fdwf4 2/2 Running 2 18h
demoappv10-5c497c6f7c-ks5hk 2/2 Running 2 18h
proxy-5cf6d4cc8d-2kjm8 2/2 Running 0 9m27s
4、client 访问 frontend proxy (补充 :真正发挥网格流量调度的是 egress listener)
# 访问 frontend proxy 流量走向 client pod -> Sidecar Envoy(Egress Listener proxy:80) -> (Ingress Listener) poroxy pod -> (Egress Listener)demoappv10:8080 -> (Ingress Listener)demoappv10 pod
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/01-demoapp-v10# kubectl run client --image=ikubernetes/admin-box -it --rm --restart=Never --command -- /bin/sh
If you don t see a command prompt, try pressing enter.
root@client # curl localhost:15000/listeners
d6eb71c4-035b-490a-9f5f-47b067ced679::0.0.0.0:15090
20f06ba5-7ac6-4034-b377-cfeb05b72820::0.0.0.0:15021
10.100.4.211_15012::10.100.4.211:15012
10.100.14.128_15443::10.100.14.128:15443
10.100.27.71_443::10.100.27.71:443
10.100.4.211_443::10.100.4.211:443
10.100.14.128_443::10.100.14.128:443
10.100.14.128_31400::10.100.14.128:31400
10.100.0.1_443::10.100.0.1:443
10.100.0.2_53::10.100.0.2:53
10.100.9.98_14268::10.100.9.98:14268
10.100.9.98_14250::10.100.9.98:14250
10.100.43.152_8000::10.100.43.152:8000
0.0.0.0_15010::0.0.0.0:15010
0.0.0.0_20001::0.0.0.0:20001
10.100.188.107_3000::10.100.188.107:3000
0.0.0.0_9090::0.0.0.0:9090
0.0.0.0_8080::0.0.0.0:8080
10.100.0.2_9153::10.100.0.2:9153
0.0.0.0_9411::0.0.0.0:9411
0.0.0.0_16685::0.0.0.0:16685
0.0.0.0_15014::0.0.0.0:15014
0.0.0.0_80::0.0.0.0:80
10.100.68.97_443::10.100.68.97:443
10.100.14.128_15021::10.100.14.128:15021
virtualOutbound::0.0.0.0:15001
virtualInbound::0.0.0.0:15006
root@client # curl localhost:15000/clusters
root@client # curl proxy
# 后端demoappv10服务网络的内容
Proxying value: iKubernetes demoapp v1.0 !! ClientIP: 127.0.0.6, ServerName: demoappv10-5c497c6f7c-24dk4, ServerIP: 10.220.104.143!
- Took 314 milliseconds.
1.2、简单功能案例 V2
1.2.1、案例图示
- 两个应用
- frontend (proxy) : 前端应用,会请求后端的demoapp;
- demoapp : 后端应用,同时部署有两个版本;
1.2.2、案例实验
- 通过定义 VirtualService 来补充、升级默认路由配置效果;
1、部署demoappv11
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# cat deploy-demoapp-v11.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: demoappv11
version: v1.1
name: demoappv11
spec:
progressDeadlineSeconds: 600
replicas: 2
selector:
matchLabels:
app: demoapp
version: v1.1
template:
metadata:
labels:
app: demoapp
version: v1.1
spec:
containers:
- image: registry.cn-wulanchabu.aliyuncs.com/daizhe/demoapp:v1.1
imagePullPolicy: IfNotPresent
name: demoapp
env:
- name: "PORT"
value: "8080"
ports:
- containerPort: 8080
name: web
protocol: TCP
resources:
limits:
cpu: 50m
---
apiVersion: v1
kind: Service
metadata:
name: demoappv11
spec:
ports:
- name: http-8080
port: 8080
protocol: TCP
targetPort: 8080
selector:
app: demoapp
version: v1.1
type: ClusterIP
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# kubectl apply -f deploy-demoapp-v11.yaml
deployment.apps/demoappv11 created
service/demoappv11 created
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
demoappv10 ClusterIP 10.100.67.168 <none> 8080/TCP 18h
demoappv11 ClusterIP 10.100.72.0 <none> 8080/TCP 17s
kubernetes ClusterIP 10.100.0.1 <none> 443/TCP 4d17h
proxy ClusterIP 10.100.131.98 <none> 80/TCP 57m
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# kubectl get pods
NAME READY STATUS RESTARTS AGE
demoappv10-5c497c6f7c-24dk4 2/2 Running 2 18h
demoappv10-5c497c6f7c-fdwf4 2/2 Running 2 18h
demoappv10-5c497c6f7c-ks5hk 2/2 Running 2 18h
demoappv11-7984f579f5-9bzmv 2/2 Running 0 39s
demoappv11-7984f579f5-qsw5z 2/2 Running 0 39s
proxy-5cf6d4cc8d-2kjm8 2/2 Running 0 57m
# 此时会在网格内的各个 Sidecar Proxy 上对应生成 listener、cluster、routes、endpoints等相关资源
2、调配 frontend proxy 的访问 demoapp service
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# cat deploy-proxy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: proxy
spec:
progressDeadlineSeconds: 600
replicas: 1
selector:
matchLabels:
app: proxy
template:
metadata:
labels:
app: proxy
spec:
containers:
- env:
- name: PROXYURL
value: http://demoapp:8080 # 访问demoapp service
image: registry.cn-wulanchabu.aliyuncs.com/daizhe/proxy:v0.1.1
imagePullPolicy: IfNotPresent
name: proxy
ports:
- containerPort: 8080
name: web
protocol: TCP
resources:
limits:
cpu: 50m
---
apiVersion: v1
kind: Service
metadata:
name: proxy
spec:
ports:
- name: http-80
port: 80
protocol: TCP
targetPort: 8080
selector:
app: proxy
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# kubectl apply -f deploy-proxy.yaml
deployment.apps/proxy configured
service/proxy unchanged
3、创建demoapp service
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# cat service-demoapp.yaml
---
apiVersion: v1
kind: Service
metadata:
name: demoapp
spec:
ports:
- name: http
port: 8080
protocol: TCP
targetPort: 8080
selector: # 选择pod标签为 demoappv10 和 demoappv11 共同存在的标签
app: demoapp
type: ClusterIP
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# kubectl apply -f service-demoapp.yaml
service/demoapp created
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
demoapp ClusterIP 10.100.7.143 <none> 8080/TCP 9s
demoappv10 ClusterIP 10.100.67.168 <none> 8080/TCP 18h
demoappv11 ClusterIP 10.100.72.0 <none> 8080/TCP 5m20s
kubernetes ClusterIP 10.100.0.1 <none> 443/TCP 4d17h
proxy ClusterIP 10.100.131.98 <none> 80/TCP 62m
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# kubectl get endpoints
NAME ENDPOINTS AGE
demoapp 10.220.104.143:8080,10.220.104.147:8080,10.220.104.151:8080 + 2 more... 24s
demoappv10 10.220.104.143:8080,10.220.104.147:8080,10.220.104.151:8080 18h
demoappv11 10.220.104.173:8080,10.220.104.175:8080 5m35s
kubernetes 172.19.107.218:6443 4d17h
proxy 10.220.104.174:8080 62m
4、client 访问 frontend proxy (补充 :真正发挥网格流量调度的是 egress listener)
# 目前使用client 访问 frontend proxy 是在两个版本的后端cluster上进行轮换;
root@native01:~# kubectl run client --image=ikubernetes/admin-box -it --rm --restart=Never --command -- /bin/sh
If you don t see a command prompt, try pressing enter.
root@client # while true;do curl proxy/hostname; sleep 0.$RANDOM; done
Proxying value: ServerName: demoappv11-7984f579f5-qsw5z
- Took 254 milliseconds.
Proxying value: ServerName: demoappv10-5c497c6f7c-24dk4
- Took 24 milliseconds.
Proxying value: ServerName: demoappv10-5c497c6f7c-fdwf4
- Took 9 milliseconds.
Proxying value: ServerName: demoappv10-5c497c6f7c-ks5hk
- Took 7 milliseconds.
Proxying value: ServerName: demoappv10-5c497c6f7c-24dk4
- Took 18 milliseconds.
Proxying value: ServerName: demoappv10-5c497c6f7c-fdwf4
- Took 8 milliseconds.
Proxying value: ServerName: demoappv11-7984f579f5-9bzmv
- Took 29 milliseconds.
Proxying value: ServerName: demoappv10-5c497c6f7c-ks5hk
- Took 14 milliseconds.
4、查看demoapp 虚拟主机默认路由配置
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# DEMOAPP=$(kubectl get pods -l app=demoapp -o jsonpath={.items[0].metadata.name})
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# echo $DEMOAPP
demoappv10-5c497c6f7c-24dk4
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# istioctl proxy-config routes $DEMOAPP
# 默认没有 VIRTUAL SERVICE 路由规则
NAME DOMAINS MATCH VIRTUAL SERVICE
InboundPassthroughClusterIpv4 * /*
jaeger-collector.istio-system.svc.cluster.local:14268 * /*
inbound|8080|| * /*
grafana.istio-system.svc.cluster.local:3000 * /*
kube-dns.kube-system.svc.cluster.local:9153 * /*
* /healthz/ready*
jaeger-collector.istio-system.svc.cluster.local:14250 * /*
kubernetes-dashboard.kubernetes-dashboard.svc.cluster.local:443 * /*
istio-ingressgateway.istio-system.svc.cluster.local:15021 * /*
80 istio-egressgateway.istio-system, 10.100.27.71 /*
80 istio-ingressgateway.istio-system, 10.100.14.128 /*
80 proxy, proxy.default + 1 more... /*
80 tracing.istio-system, 10.100.137.187 /*
9090 kiali.istio-system, 10.100.134.85 /*
9090 prometheus.istio-system, 10.100.40.193 /*
InboundPassthroughClusterIpv4 * /*
8080 demoapp, demoapp.default + 1 more... /*
8080 demoappv10, demoappv10.default + 1 more... /*
8080 demoappv11, demoappv11.default + 1 more... /*
inbound|8080|| * /*
# 目的 :期望流量在两个版本中进行按需分配,所以并非默认配置可以达到的效果,所以下面要对demoapp 这个服务所代表的 listener、cluster、route 进行自定义
5、使用 VirtualService 对 frontend proxy 访问的 demoapp的route进行如下定义
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# cat virutalservice-demoapp.yaml
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
# VS资源名称
name: demoapp
spec:
# (demoapp 自动生成的虚拟主机做高级配置),需要和istioctl proxy-config routes $DEMOAPP的DOMAINS搜索域名称保持一致
# 对demoapp service 服务的访问
hosts:
- demoapp
# 七层路由机制
http:
# 路由策略为列表项,可以设置为多个
# 路由名称,起一个名称
- name: canary
# 匹配条件
match: # demoapp/canary 重写为 demoapp/
- uri:
prefix: /canary
rewrite:
uri: /
# 路由目标
route:
- destination:
host: demoappv11 # 调度给demoappv11 的 clusters
# 路由名称,起一个名称 ,如果没有被 demoapp/canary 匹配到的将被这个default 路由所处理
- name: default
# 路由目标
route:
- destination:
host: demoappv10 # 调度给demoappv10 的 clusters
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# kubectl apply -f virutalservice-demoapp.yaml
virtualservice.networking.istio.io/demoapp created
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# kubectl get vs
NAME GATEWAYS HOSTS AGE
demoapp ["demoapp"] 6s
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/02-demoapp-v11# istioctl proxy-config routes $DEMOAPP | grep demoapp
80 demoapp.default.svc.cluster.local /canary* demoapp.default
80 demoapp.default.svc.cluster.local /* demoapp.default
8080 demoapp, demoapp.default + 1 more... /canary* demoapp.default
8080 demoapp, demoapp.default + 1 more... /* demoapp.default
8080 demoappv10, demoappv10.default + 1 more... /*
8080 demoappv11, demoappv11.default + 1 more... /*
6、client 访问 frontend proxy (补充 :真正发挥网格流量调度的是 egress listener)
# 访问 /canary 路由,都将路由到 demoappv11 cluster
root@client # while true;do curl proxy/canary; sleep 0.$RANDOM; done
Proxying value: iKubernetes demoapp v1.1 !! ClientIP: 127.0.0.6, ServerName: demoappv11-7984f579f5-9bzmv, ServerIP: 10.220.104.173!
- Took 26 milliseconds.
Proxying value: iKubernetes demoapp v1.1 !! ClientIP: 127.0.0.6, ServerName: demoappv11-7984f579f5-qsw5z, ServerIP: 10.220.104.175!
- Took 24 milliseconds.
Proxying value: iKubernetes demoapp v1.1 !! ClientIP: 127.0.0.6, ServerName: demoappv11-7984f579f5-9bzmv, ServerIP: 10.220.104.173!
- Took 4 milliseconds.
Proxying value: iKubernetes demoapp v1.1 !! ClientIP: 127.0.0.6, ServerName: demoappv11-7984f579f5-qsw5z, ServerIP: 10.220.104.175!
# 访问 非 /canary 路由,都将路由到 demoappv10 cluster
root@client # while true;do curl proxy/hostname; sleep 0.$RANDOM; done
Proxying value: ServerName: demoappv10-5c497c6f7c-fdwf4
- Took 24 milliseconds.
Proxying value: ServerName: demoappv10-5c497c6f7c-ks5hk
- Took 15 milliseconds.
Proxying value: ServerName: demoappv10-5c497c6f7c-24dk4
- Took 7 milliseconds.
Proxying value: ServerName: demoappv10-5c497c6f7c-ks5hk
- Took 7 milliseconds.
Proxying value: ServerName: demoappv10-5c497c6f7c-24dk4
- Took 8 milliseconds.
- graph能够根据流量实时进行图形绘制
思考 🤔 :
- 通过定义 VirtualService 来补充、升级默认路由配置效果;
- 但是此定义逻辑和策略并非很高大上,因为demoapp本质上是一个服务,只是被部署了demoappv10 和 demoappv11 两个版本,如何统一到一个服务名称上呢?如何作为不同的子集来使用呢?就需要借助 下面示例中DestinationRule 来实现;
1.3、简单功能案例 V3 - subset
1.3.1、案例图示
- 两个应用
- frontend (proxy) : 前端应用,会请求后端的demoapp;
- demoapp : 后端应用,同时部署有两个版本;
1.3.2、案例实验
# 补充上面 demoappv10 和 demoappv11 的信息
# cat deploy-demoapp-v10.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: demoappv11
version: v1.0
name: demoappv10
spec:
progressDeadlineSeconds: 600
replicas: 2
selector:
matchLabels:
app: demoapp
version: v1.0
template:
metadata:
labels:
app: demoapp
version: v1.0
spec:
containers:
- image: registry.cn-wulanchabu.aliyuncs.com/daizhe/native/demoapp:v1.0
imagePullPolicy: IfNotPresent
name: demoapp
env:
- name: "PORT"
value: "8080"
ports:
- containerPort: 8080
name: web
protocol: TCP
resources:
limits:
cpu: 50m
---
apiVersion: v1
kind: Service
metadata:
name: demoappv10
spec:
ports:
- name: http-8080
port: 8080
protocol: TCP
targetPort: 8080
selector:
app: demoapp
version: v1.0
type: ClusterIP
# cat deploy-demoapp-v11.yaml
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: demoappv11
version: v1.1
name: demoappv11
spec:
progressDeadlineSeconds: 600
replicas: 2
selector:
matchLabels:
app: demoapp
version: v1.1
template:
metadata:
labels:
app: demoapp
version: v1.1
spec:
containers:
- image: registry.cn-wulanchabu.aliyuncs.com/daizhe/native/demoapp:v1.1
imagePullPolicy: IfNotPresent
name: demoapp
env:
- name: "PORT"
value: "8080"
ports:
- containerPort: 8080
name: web
protocol: TCP
resources:
limits:
cpu: 50m
---
apiVersion: v1
kind: Service
metadata:
name: demoappv11
spec:
ports:
- name: http-8080
port: 8080
protocol: TCP
targetPort: 8080
selector:
app: demoapp
version: v1.1
type: ClusterIP
1、定义DestinationRule
# DestinationRule 的主要作用就是将定义的demoapp 的service后端适配到的5个后端Pod分为两组,v10和v11两个组称为两个子集;
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/03-demoapp-subset# cat destinationrule-demoapp.yaml
apiVersion: networking.istio.io/v1beta1
kind: DestinationRule
metadata:
# DS名称,标注集群名,通常和service保持一致,表示升级此service的配置
name: demoapp
spec:
# 主机 :对demoapp service 服务的访问
host: demoapp
# 集群子集划分策略, 这里使用标签选择器对后端POD做逻辑组划分
subsets:
# 逻辑组名称
- name: v10
# 在原本的筛选条件上,额外增加使用以下标签选择器对后端端点归类为 v10 子集
labels:
version: v1.0
- name: v11
# 在原本的筛选条件上,额外增加使用以下标签选择器对后端端点归类为 v11 子集
labels:
version: v1.1
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/03-demoapp-subset# kubectl apply -f destinationrule-demoapp.yaml
destinationrule.networking.istio.io/demoapp created
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/03-demoapp-subset# kubectl get DestinationRule
NAME HOST AGE
demoapp demoapp 2m42s
oot@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/03-demoapp-subset# istioctl proxy-config clusters $DEMOAPP
SERVICE FQDN PORT SUBSET DIRECTION TYPE DESTINATION RULE
8080 - inbound ORIGINAL_DST
BlackHoleCluster - - - STATIC
InboundPassthroughClusterIpv4 - - - ORIGINAL_DST
PassthroughCluster - - - ORIGINAL_DST
agent - - - STATIC
dashboard-metrics-scraper.kubernetes-dashboard.svc.cluster.local 8000 - outbound EDS
demoapp.default.svc.cluster.local 8080 - outbound EDS demoapp.default
demoapp.default.svc.cluster.local 8080 v10 outbound EDS demoapp.default
demoapp.default.svc.cluster.local 8080 v11 outbound EDS demoapp.default
demoappv10.default.svc.cluster.local 8080 - outbound EDS
demoappv11.default.svc.cluster.local 8080 - outbound EDS
2、调配 VirtualService (就可以省略定义 demoappv10 和 demoappv11 的service,仅需要定义demoapp这一个service就可以了)
# 使用 VirtualService 对 frontend proxy 访问的 demoapp的route
# VirtualService 主要作用,定义一些路由规则并下发给集群的各个 sidecar proxy 使用;
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/03-demoapp-subset# cat virutalservice-demoapp.yaml
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: demoapp
spec:
hosts:
- demoapp
http:
- name: canary
# 匹配条件
match:
- uri:
prefix: /canary
rewrite:
uri: /
# 路由目标
route:
- destination:
# 调度给demoapp的clusters的v11子集
host: demoapp
# 子集
subset: v11
- name: default
route:
- destination:
# 调度给demoapp的clusters的v10子集
host: demoapp
# 子集
subset: v10
# 需要先delete掉此前创建的 kubectl delete vs demoapp
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/03-demoapp-subset# kubectl apply -f virutalservice-demoapp.yaml
virtualservice.networking.istio.io/demoapp created
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/03-demoapp-subset# istioctl proxy-config routes $DEMOAPP | grep demoapp
80 demoapp.default.svc.cluster.local /canary* demoapp.default
80 demoapp.default.svc.cluster.local /* demoapp.default
8080 demoapp, demoapp.default + 1 more... /canary* demoapp.default
8080 demoapp, demoapp.default + 1 more... /* demoapp.default
8080 demoappv10, demoappv10.default + 1 more... /*
8080 demoappv11, demoappv11.default + 1 more... /*
3、client 访问 frontend proxy (补充 :真正发挥网格流量调度的是 egress listener)
# kubectl run client --image=registry.cn-wulanchabu.aliyuncs.com/daizhe/admin-box -it --rm --restart=Never --command -- /bin/sh
# 访问 /canary 路由,都将路由到 demoapp cluster的v11子集
root@client # while true;do curl proxy/canary; sleep 0.$RANDOM; done
Proxying value: iKubernetes demoapp v1.1 !! ClientIP: 127.0.0.6, ServerName: demoappv11-7984f579f5-qsw5z, ServerIP: 10.220.104.175!
- Took 9 milliseconds.
Proxying value: iKubernetes demoapp v1.1 !! ClientIP: 127.0.0.6, ServerName: demoappv11-7984f579f5-9bzmv, ServerIP: 10.220.104.173!
- Took 7 milliseconds.
Proxying value: iKubernetes demoapp v1.1 !! ClientIP: 127.0.0.6, ServerName: demoappv11-7984f579f5-9bzmv, ServerIP: 10.220.104.173!
- Took 8 milliseconds.
Proxying value: iKubernetes demoapp v1.1 !! ClientIP: 127.0.0.6, ServerName: demoappv11-7984f579f5-qsw5z, ServerIP: 10.220.104.175!
- Took 8 milliseconds.
# 访问 非 /canary 路由,都将路由到 demoapp cluster的v10子集
root@client # while true;do curl proxy/hostname; sleep 0.$RANDOM; done
Proxying value: ServerName: demoappv10-5c497c6f7c-fdwf4
- Took 10 milliseconds.
Proxying value: ServerName: demoappv10-5c497c6f7c-fdwf4
- Took 6 milliseconds.
Proxying value: ServerName: demoappv10-5c497c6f7c-ks5hk
- Took 7 milliseconds.
Proxying value: ServerName: demoappv10-5c497c6f7c-24dk4
- Took 8 milliseconds.
Proxying value: ServerName: demoappv10-5c497c6f7c-ks5hk
- Took 8 milliseconds.
- kiali graph能够根据流量实时进行图形绘制
# 此时就可以将 此前创建的 demoappv10 和 demoappv11 两个Service去掉了,上面借助DestinationRule 创建了子集;
# 所以以后多版本场景就可以定义一个 service并将多版本的Pod全部包含,并各自版本有版本标签,就可以使用DestinationRule来根据版本标签划分不同子集;
# 删除后自动生成的 cluster也会被清除
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/03-demoapp-subset# kubectl delete service demoappv10 demoappv11
service "demoappv10" deleted
service "demoappv11" deleted
1.4、简单功能案例 V4 - 使用gateway暴露proxy服务
1.4.1、案例图示
- 使用Gateway资源,将proxy开放至集群外部;
- proxy-gateway -> virtualservice/proxy -> destinationrule/proxy (或者 service/proxy) -> MESH
1.4.2、案例实验
1、创建Gateway资源,通过网格,为frontend引入集群外部的流量
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/04-proxy-gateway# cat gateway-proxy.yaml
apiVersion: networking.istio.io/v1beta1
kind: Gateway
metadata:
name: proxy-gateway
namespace: istio-system # 要指定为ingress gateway pod所在名称空间,否则注入失败
spec:
# 此Gateway资源适配到哪个Ingress Gateway上
selector:
app: istio-ingressgateway
# 虚拟主机
servers:
# 端口
- port:
# 客户端访问的端口
number: 80
# 端口名称;这里的http就表明了该套接字的协议使用七层代理;如果不是这种格式直接转换为 四层代理;
name: http
# 类同上面的 http (HTTP/TCP)
protocol: HTTP
# 适配的主机名
hosts:
- "fe.pyenc.com"
2、对 Ingress Gateway 给Proxy定义VirtualService
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/04-proxy-gateway# cat virtualservice-proxy.yaml
apiVersion: networking.istio.io/v1beta1
kind: VirtualService
metadata:
name: proxy
spec:
# 用于定义哪个路由和虚拟主机有关系,所以需要指定hosts,此hosts必须和GW中hosts保持一致或者包含关系
hosts:
- "fe.pyenc.com" # 对应于gateways/proxy-gateway
# (和网关相关联)
# gateways用于指定该 vs 是定义在 Ingress Gateway 的接收入栈流量,并指定GW名称
gateways:
- istio-system/proxy-gateway # 相关定义仅应用于Ingress Gateway上
#- mesh # mesh 表示同时对网格内部也生效;( 既能引入网格外部流量,又能允许网格内部客户端访问,也会在网格内部 sidecar proxy 也会生成路由规则 )
# http 路由
http:
# 路由策略名称
- name: default
# 路由目标
route:
- destination:
host: proxy
# proxy cluster 是被自动生成的,因为集群内部有一个同名的Service,而且此集群在 ingess gateway 上本身存在
# 内部集群Service名称,但是流量不会直接发给Service,而是发给由Service组成的集群(这里的七层调度流量不再经由Service)
# 取出 ingress-gateway pod名称方便后续使用
# InGW=$(kubectl get pods -n istio-system -l app=istio-ingressgateway -o jsonpath={.items[0].metadata.name})
# echo $InGW
istio-ingressgateway-78f69bd5db-wd5gw
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/04-proxy-gateway# istioctl proxy-config clusters $InGW -n istio-system | grep proxy
proxy.default.svc.cluster.local 80 - outbound EDS
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/04-proxy-gateway# kubectl apply -f .
gateway.networking.istio.io/proxy-gateway created
virtualservice.networking.istio.io/proxy created
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/04-proxy-gateway# kubectl get gw -n istio-system
NAME AGE
grafana-gateway 2d9h
kiali-gateway 2d20h
proxy-gateway 28s
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/04-proxy-gateway# kubectl get vs
NAME GATEWAYS HOSTS AGE
demoapp ["demoapp"] 159m
proxy ["istio-system/proxy-gateway"] ["fe.pyenc.com"] 49s
# 查看 Ingress Gateway 中定义的路由
root@native01:~/istio/istio-in-practise/Traffic-Management-Basics/ms-demo/04-proxy-gateway# istioctl proxy-config routes $InGW -n istio-system
NAME DOMAINS MATCH VIRTUAL SERVICE
http.8080 fe.pyenv.com /* proxy.default
http.8080 grafana.pyenv.cc /* grafana-virtualservice.istio-system
http.8080 kiali.pyenv.com /* kiali-virtualservice.istio-system
* /stats/prometheus*
* /healthz/ready*
3、外部访问 proxy
- 4、kiali graph能够根据流量实时进行图形绘制