Istio-跨网络多主架构(实现多集群的流量治理)(2023-11-12)

跨网络多主架构的安装(2023-11-12)

使用 Istio 连接多个集群

实现跨多集群的流量治理

k8s版本:k8s-1.28.2

器运行时版本:containerd-1.6.24

网络插件:calico-v3.26.1

istio版本:istio-1.19.3

这里准备四个 k8s 集群(假设四个集群分别部署运行在北上广深区域)

前提条件:四个集群的API-SERVER(kube-apiserver)需要互通!!!

每一个集群的istio都有独立的控制平面、独立的网络

准备四个 k8s 集群

k8s-master-beijing		192.168.1.201
k8s-node-beijing		192.168.1.202

k8s-master-shanghai		192.168.1.203
k8s-node-shanghai		192.168.1.204

k8s-master-guangzhou	192.168.1.205
k8s-node-guangzhou		192.168.1.206

k8s-master-shenzhen		192.168.1.207
k8s-node-shenzhen		192.168.1.208

cat >> /etc/hosts << EOF
192.168.1.201 k8s-master-beijing	
192.168.1.202 k8s-node-beijing
192.168.1.203 k8s-master-shanghai
192.168.1.204 k8s-node-shanghai
192.168.1.205 k8s-master-guangzhou
192.168.1.206 k8s-node-guangzhou
192.168.1.207 k8s-master-shenzhen	
192.168.1.208 k8s-node-shenzhen	
EOF
k8s-master-beijing
k8s-master-shanghai
k8s-master-guangzhou
k8s-master-shenzhen

cluster-beijing
cluster-shanghai
cluster-guangzhou
cluster-shenzhen
# kubectl get node
NAME                 STATUS   ROLES           AGE   VERSION
k8s-master-beijing   Ready    control-plane   80m   v1.28.2
k8s-node-beijing     Ready    <none>          79m   v1.28.2

# kubectl get node
NAME                   STATUS   ROLES           AGE   VERSION
k8s-master-guangzhou   Ready    control-plane   78m   v1.28.2
k8s-node-guangzhou     Ready    <none>          78m   v1.28.2

 # kubectl get node
NAME                  STATUS   ROLES           AGE   VERSION
k8s-master-shanghai   Ready    control-plane   79m   v1.28.2
k8s-node-shanghai     Ready    <none>          78m   v1.28.2

# kubectl get node
NAME                  STATUS   ROLES           AGE   VERSION
k8s-master-shenzhen   Ready    control-plane   78m   v1.28.2
k8s-node-shenzhen     Ready    <none>          77m   v1.28.2

使用 MetalLBLoadBalancer 服务使用 EXTERNAL-IP

在 cluster1、cluster2 安装 MetalLB

kubectl edit configmap -n kube-system kube-proxy
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
ipvs:
  strictARP: true
# see what changes would be made, returns nonzero returncode if different
kubectl get configmap kube-proxy -n kube-system -o yaml | \
sed -e "s/strictARP: false/strictARP: true/" | \
kubectl diff -f - -n kube-system

# actually apply the changes, returns nonzero returncode on errors only
kubectl get configmap kube-proxy -n kube-system -o yaml | \
sed -e "s/strictARP: false/strictARP: true/" | \
kubectl apply -f - -n kube-system

配置 MetalLB 为Layer2模式

kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.13.12/config/manifests/metallb-native.yaml
# k8s-master-beijing 创建ip地址池
cat > iptool.yaml << 'EOF'
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: default
  namespace: metallb-system
spec:
  addresses:
  - 192.168.1.100-192.168.1.119 # 网段跟node节点保持一致
  autoAssign: true
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
  name: default
  namespace: metallb-system
spec:
  ipAddressPools:
  - default
EOF

kubectl apply -f iptool.yaml -n metallb-system
# kubectl  get IPAddressPool -n metallb-system
NAME      AUTO ASSIGN   AVOID BUGGY IPS   ADDRESSES
default   true          false             ["192.168.1.100-192.168.1.119"]
# k8s-master-shanghai 创建ip地址池
cat > iptool.yaml << 'EOF'
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: default
  namespace: metallb-system
spec:
  addresses:
  - 192.168.1.120-192.168.1.139 # 网段跟node节点保持一致
  autoAssign: true
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
  name: default
  namespace: metallb-system
spec:
  ipAddressPools:
  - default
EOF

kubectl apply -f iptool.yaml -n metallb-system
# kubectl  get IPAddressPool -n metallb-system
NAME      AUTO ASSIGN   AVOID BUGGY IPS   ADDRESSES
default   true          false             ["192.168.1.120-192.168.1.139"]
# k8s-master-guangzhou 创建ip地址池
cat > iptool.yaml << 'EOF'
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: default
  namespace: metallb-system
spec:
  addresses:
  - 192.168.1.140-192.168.1.159 # 网段跟node节点保持一致
  autoAssign: true
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
  name: default
  namespace: metallb-system
spec:
  ipAddressPools:
  - default
EOF

kubectl apply -f iptool.yaml -n metallb-system
# kubectl  get IPAddressPool -n metallb-system
NAME      AUTO ASSIGN   AVOID BUGGY IPS   ADDRESSES
default   true          false             ["192.168.1.140-192.168.1.159"]
# k8s-master-shenzhen 创建ip地址池
cat > iptool.yaml << 'EOF'
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
  name: default
  namespace: metallb-system
spec:
  addresses:
  - 192.168.1.160-192.168.1.179 # 网段跟node节点保持一致
  autoAssign: true
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
  name: default
  namespace: metallb-system
spec:
  ipAddressPools:
  - default
EOF

kubectl apply -f iptool.yaml -n metallb-system
# kubectl  get IPAddressPool -n metallb-system
NAME      AUTO ASSIGN   AVOID BUGGY IPS   ADDRESSES
default   true          false             ["192.168.1.160-192.168.1.179"]

合并多个kubeconfig

mkdir -p ~/kubeconfig
scp k8s-master-beijing:~/.kube/config ~/kubeconfig/k8s-master-beijing-kubeconfig
scp k8s-master-shanghai:~/.kube/config ~/kubeconfig/k8s-master-shanghai-kubeconfig
scp k8s-master-guangzhou:~/.kube/config ~/kubeconfig/k8s-master-guangzhou-kubeconfig
scp k8s-master-shenzhen:~/.kube/config ~/kubeconfig/k8s-master-shenzhen-kubeconfig
# cat k8s-master-beijing-kubeconfig
# cat k8s-master-shanghai-kubeconfig
# cat k8s-master-guangzhou-kubeconfig
# cat k8s-master-shenzhen-kubeconfig
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: ...
    server: https://x.x.x.x:6443
  name: k8s-master-beijing # 修改
contexts:
- context:
    cluster: k8s-master-beijing # 修改
    namespace: default
    user: k8s-master-beijing # 修改
  name: k8s-master-beijing@kubernetes # 修改
current-context: k8s-master-beijing@kubernetes # 修改
kind: Config
preferences: {}
users:
- name: k8s-master-beijing # 修改
  user:
    client-certificate-data: ...
    client-key-data: ...
KUBECONFIG=k8s-master-beijing-kubeconfig:k8s-master-shanghai-kubeconfig:k8s-master-guangzhou-kubeconfig:k8s-master-shenzhen-kubeconfig kubectl config view --flatten  > kubeconfig

kubectl config get-contexts --kubeconfig=kubeconfig
kubectl config get-contexts

cp kubeconfig /root/.kube/config

kubectl config use-context kubernetes-admin@kubernetes --kubeconfig=/root/.kube/config
kubectl config use-context kubernetes-admin-203@kubernetes --kubeconfig=/root/.kube/config

为 k8s-master-beijing、k8s-master-shanghai、k8s-master-guangzhou、k8s-master-shenzhen设置上下文

export k8s_master_beijing=k8s-master-beijing@kubernetes
export k8s-master_shanghai=k8s-master-shanghai@kubernetes
export k8s_master_guangzhou=k8s-master-guangzhou@kubernetes
export k8s_master_shenzhen=k8s-master-shenzhen@kubernetes

以下所有操作在 k8s-master-beijing上操作即可

安装 istioctl

cd && wget https://github.com/istio/istio/releases/download/1.19.3/istio-1.19.3-linux-amd64.tar.gz
tar xf istio-1.19.3-linux-amd64.tar.gz

cp ~/istio-1.19.3/bin/istioctl /usr/bin/istioctl
# istioctl version
no ready Istio pods in "istio-system"
1.19.3

istioctl 命令补全

yum -y install bash-completion

source /etc/profile.d/bash_completion.sh

cp ~/istio-1.19.3/tools/istioctl.bash ~/.istioctl.bash

source ~/.istioctl.bash

k8s-master-beijing设置缺省网络

kubectl create ns istio-system --context="${k8s_master_beijing}"

kubectl --context="${k8s_master_beijing}" label namespace istio-system topology.istio.io/network=k8s-master-beijing-network

kubectl  get ns --show-labels --context="${k8s_master_beijing}"

k8s-master-shanghai设置缺省网络

kubectl create ns istio-system --context="${k8s_master_shanghai}"

kubectl --context="${k8s_master_shanghai}" label namespace istio-system topology.istio.io/network=k8s-master-shanghai-network

kubectl  get ns --show-labels --context="${k8s_master_shanghai}"

k8s-master-guangzhou设置缺省网络

kubectl create ns istio-system --context="${k8s_master_guangzhou}"

kubectl --context="${k8s_master_guangzhou}" label namespace istio-system topology.istio.io/network=k8s-master-guangzhou-network

kubectl  get ns --show-labels --context="${k8s_master_guangzhou}"

k8s-master-shenzhen设置缺省网络

kubectl create ns istio-system --context="${k8s_master_shenzhen}"

kubectl --context="${k8s_master_shenzhen}" label namespace istio-system topology.istio.io/network=k8s-master-shenzhen-network

kubectl  get ns --show-labels --context="${k8s_master_shenzhen}"

为 k8s-master-beijing、k8s-master-shanghai、k8s-master-guangzhou、k8s-master-shenzhen添加证书

mkdir -p ~/certs && cd ~/certs
make -f ~/istio-1.19.3/tools/certs/Makefile.selfsigned.mk root-ca
make -f ~/istio-1.19.3/tools/certs/Makefile.selfsigned.mk k8s-master-beijing-cacerts

make -f ~/istio-1.19.3/tools/certs/Makefile.selfsigned.mk k8s-master-shanghai-cacerts

make -f ~/istio-1.19.3/tools/certs/Makefile.selfsigned.mk k8s-master-guangzhou-cacerts

make -f ~/istio-1.19.3/tools/certs/Makefile.selfsigned.mk k8s-master-shenzhen-cacerts
kubectl create secret generic cacerts -n istio-system \
      --from-file=k8s-master-beijing/ca-cert.pem \
      --from-file=k8s-master-beijing/ca-key.pem \
      --from-file=k8s-master-beijing/root-cert.pem \
      --from-file=k8s-master-beijing/cert-chain.pem \
      --context="${k8s_master_beijing}"
kubectl create secret generic cacerts -n istio-system \
      --from-file=k8s-master-shanghai/ca-cert.pem \
      --from-file=k8s-master-shanghai/ca-key.pem \
      --from-file=k8s-master-shanghai/root-cert.pem \
      --from-file=k8s-master-shanghai/cert-chain.pem \
      --context="${k8s_master_shanghai}"
kubectl create secret generic cacerts -n istio-system \
      --from-file=k8s-master-guangzhou/ca-cert.pem \
      --from-file=k8s-master-guangzhou/ca-key.pem \
      --from-file=k8s-master-guangzhou/root-cert.pem \
      --from-file=k8s-master-guangzhou/cert-chain.pem \
      --context="${k8s_master_guangzhou}"
kubectl create secret generic cacerts -n istio-system \
      --from-file=k8s-master-shenzhen/ca-cert.pem \
      --from-file=k8s-master-shenzhen/ca-key.pem \
      --from-file=k8s-master-shenzhen/root-cert.pem \
      --from-file=k8s-master-shenzhen/cert-chain.pem \
      --context="${k8s_master_shenzhen}"
mkdir -p ~/istio-yml

1、将 k8s-master-beijing设为主集群

k8s-master-beijing 创建 Istio 配置文件:

cat > ~/istio-yml/k8s-master-beijing.yml << 'EOF' 
apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
spec:
  values:
    global:
      meshID: mesh1
      multiCluster:
        clusterName: k8s-master-beijing
      network: k8s-master-beijing-network
EOF
istioctl install --context="${k8s_master_beijing}" -f ~/istio-yml/k8s-master-beijing.yml

k8s-master-beijing安装东西向网关

~/istio-1.19.3/samples/multicluster/gen-eastwest-gateway.sh \
--mesh mesh1 --cluster k8s-master-beijing --network k8s-master-beijing-network | \
istioctl --context="${k8s_master_beijing}" install -y -f -

开放 k8s-master-beijing中的服务

kubectl --context="${k8s_master_beijing}" apply -n istio-system -f \
~/istio-1.19.3/samples/multicluster/expose-services.yaml
    
kubectl get gateways.networking.istio.io --context="${k8s_master_beijing}" -n istio-system

2、将 k8s-master-shanghai设为主集群

k8s-master-shanghai创建 Istio 配置文件:

cat > ~/istio-yml/k8s-master-shanghai.yml << 'EOF' 
apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
spec:
  values:
    global:
      meshID: mesh1
      multiCluster:
        clusterName: k8s-master-shanghai
      network: k8s-master-shanghai-network
EOF
istioctl install --context="${k8s_master_shanghai}" -f ~/istio-yml/k8s-master-shanghai.yml

k8s-master-shanghai安装东西向网关

~/istio-1.19.3/samples/multicluster/gen-eastwest-gateway.sh \
--mesh mesh1 --cluster k8s-master-shanghai --network k8s-master-shanghai-network | \
istioctl --context="${k8s_master_shanghai}" install -y -f -

开放 k8s-master-shanghai中的服务

kubectl --context="${k8s_master_shanghai}" apply -n istio-system -f \
~/istio-1.19.3/samples/multicluster/expose-services.yaml

kubectl get gateways.networking.istio.io --context="${k8s_master_shanghai}" -n istio-system

3、将 k8s-master-guangzhou设为主集群

k8s-master-guangzhou 创建 Istio 配置文件:

cat > ~/istio-yml/k8s-master-guangzhou.yml << 'EOF' 
apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
spec:
  values:
    global:
      meshID: mesh1
      multiCluster:
        clusterName: k8s-master-guangzhou
      network: k8s-master-guangzhou-network
EOF
istioctl install --context="${k8s_master_guangzhou}" -f ~/istio-yml/k8s-master-guangzhou.yml

k8s-master-guangzhou安装东西向网关

~/istio-1.19.3/samples/multicluster/gen-eastwest-gateway.sh \
--mesh mesh1 --cluster k8s-master-guangzhou --network k8s-master-guangzhou-network | \
istioctl --context="${k8s_master_guangzhou}" install -y -f -

开放 k8s-master-guangzhou中的服务

kubectl --context="${k8s_master_guangzhou}" apply -n istio-system -f \
~/istio-1.19.3/samples/multicluster/expose-services.yaml
    
kubectl get gateways.networking.istio.io --context="${k8s_master_guangzhou}" -n istio-system

4、将 k8s-master-shenzhen设为主集群

k8s-master-shenzhen创建 Istio 配置文件:

cat > ~/istio-yml/k8s-master-shenzhen.yml << 'EOF' 
apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
spec:
  values:
    global:
      meshID: mesh1
      multiCluster:
        clusterName: k8s-master-shenzhen
      network: k8s-master-shenzhen-network
EOF
istioctl install --context="${k8s_master_shenzhen}" -f ~/istio-yml/k8s-master-shenzhen.yml

k8s-master-shenzhen安装东西向网关

~/istio-1.19.3/samples/multicluster/gen-eastwest-gateway.sh \
--mesh mesh1 --cluster k8s-master-shenzhen --network k8s-master-shenzhen-network | \
istioctl --context="${k8s_master_shenzhen}" install -y -f -

开放 k8s-master-shenzhen中的服务

kubectl --context="${k8s_master_shenzhen}" apply -n istio-system -f \
~/istio-1.19.3/samples/multicluster/expose-services.yaml

kubectl get gateways.networking.istio.io --context="${k8s_master_shenzhen}" -n istio-system

启用端点发现

1、在 k8s-master-beijing 中安装一个提供 k8s-master-shanghai、k8s-master-guangzhou、k8s-master-shenzhen的API Server 访问权限的远程 Secret,使k8s-master-beijing能够访问k8s-master-shanghai、k8s-master-guangzhou、k8s-master-shenzhen

istioctl create-remote-secret \
  --context="${k8s_master_shanghai}" \
  --name=k8s-master-shanghai | \
  kubectl apply -f - --context="${k8s_master_beijing}"
  
  istioctl create-remote-secret \
  --context="${k8s_master_guangzhou}" \
  --name=k8s-master-guangzhou | \
  kubectl apply -f - --context="${k8s_master_beijing}"
  
  istioctl create-remote-secret \
  --context="${k8s_master_shenzhen}" \
  --name=k8s-master-shenzhen | \
  kubectl apply -f - --context="${k8s_master_beijing}"

2、在 k8s-master-shanghai中安装一个提供 k8s-master-beijing、k8s-master-guangzhou、k8s-master-shenzhenAPI Server 访问权限的远程 Secret,使cluster1能够访问cluster2

istioctl create-remote-secret \
  --context="${k8s_master_beijing}" \
  --name=k8s-master-beijing | \
  kubectl apply -f - --context="${k8s_master_shanghai}"
  
  istioctl create-remote-secret \
  --context="${k8s_master_guangzhou}" \
  --name=k8s-master-guangzhou | \
  kubectl apply -f - --context="${k8s_master_shanghai}"
  
  istioctl create-remote-secret \
  --context="${k8s_master_shenzhen}" \
  --name=k8s-master-shenzhen | \
  kubectl apply -f - --context="${k8s_master_shanghai}"

3、在 k8s-master-guangzhou中安装一个提供 k8s-master-beijing、k8s-master-shanghai、k8s-master-shenzhenAPI Server 访问权限的远程 Secret,使cluster1能够访问cluster2

istioctl create-remote-secret \
  --context="${k8s_master_beijing}" \
  --name=k8s-master-beijing | \
  kubectl apply -f - --context="${k8s_master_guangzhou}"
  
  istioctl create-remote-secret \
  --context="${k8s_master_shanghai}" \
  --name=k8s-master-shanghai | \
  kubectl apply -f - --context="${k8s_master_guangzhou}"
  
  istioctl create-remote-secret \
  --context="${k8s_master_shenzhen}" \
  --name=k8s-master-shenzhen | \
  kubectl apply -f - --context="${k8s_master_guangzhou}"

4、在 k8s-master-shenzhen中安装一个提供 k8s-master-beijing、k8s-master-shanghai、k8s-master-guangzhouAPI Server 访问权限的远程 Secret,使cluster1能够访问cluster2

istioctl create-remote-secret \
  --context="${k8s_master_beijing}" \
  --name=k8s-master-beijing | \
  kubectl apply -f - --context="${k8s_master_shenzhen}"
  
  istioctl create-remote-secret \
  --context="${k8s_master_shanghai}" \
  --name=k8s-master-shanghai | \
  kubectl apply -f - --context="${k8s_master_shenzhen}"
  
  istioctl create-remote-secret \
  --context="${k8s_master_guangzhou}" \
  --name=k8s-master-guangzhou | \
  kubectl apply -f - --context="${k8s_master_shenzhen}"
istioctl proxy-status
istioctl ps
istioctl proxy-config endpoints

istioctl remote-clusters

[root@k8s-master-beijing:~] # istioctl remote-clusters
NAME                     SECRET                                                    STATUS     ISTIOD
k8s-master-shanghai      istio-system/istio-remote-secret-k8s-master-shanghai      synced     istiod-7c854bcdd8-zt6dp
k8s-master-guangzhou     istio-system/istio-remote-secret-k8s-master-guangzhou     synced     istiod-7c854bcdd8-zt6dp
k8s-master-shenzhen      istio-system/istio-remote-secret-k8s-master-shenzhen      synced     istiod-7c854bcdd8-zt6dp


[root@k8s-master-shanghai:~] # istioctl remote-clusters
NAME                     SECRET                                                    STATUS     ISTIOD
k8s-master-beijing       istio-system/istio-remote-secret-k8s-master-beijing       synced     istiod-698679c99c-vskp8
k8s-master-guangzhou     istio-system/istio-remote-secret-k8s-master-guangzhou     synced     istiod-698679c99c-vskp8
k8s-master-shenzhen      istio-system/istio-remote-secret-k8s-master-shenzhen      synced     istiod-698679c99c-vskp8


[root@k8s-master-guangzhou:~] # istioctl remote-clusters
NAME                    SECRET                                                   STATUS     ISTIOD
k8s-master-beijing      istio-system/istio-remote-secret-k8s-master-beijing      synced     istiod-77cd9c98d-4b6zk
k8s-master-shanghai     istio-system/istio-remote-secret-k8s-master-shanghai     synced     istiod-77cd9c98d-4b6zk
k8s-master-shenzhen     istio-system/istio-remote-secret-k8s-master-shenzhen     synced     istiod-77cd9c98d-4b6zk


[root@k8s-master-shenzhen:~] # istioctl remote-clusters
NAME                     SECRET                                                    STATUS     ISTIOD
k8s-master-shanghai      istio-system/istio-remote-secret-k8s-master-shanghai      synced     istiod-7d5749c785-456n6
k8s-master-guangzhou     istio-system/istio-remote-secret-k8s-master-guangzhou     synced     istiod-7d5749c785-456n6
k8s-master-beijing       istio-system/istio-remote-secret-k8s-master-beijing       synced     istiod-7d5749c785-456n6

恭喜! 您在跨网络多主架构的集群上,成功的安装了 Istio 网格

验证安装结果

部署服务 HelloWorld

kubectl create --context="${k8s_master_beijing}" namespace helloworld

kubectl create --context="${k8s_master_shanghai}" namespace helloworld

kubectl create --context="${k8s_master_guangzhou}" namespace helloworld

kubectl create --context="${k8s_master_shenzhen}" namespace helloworld
kubectl label --context="${k8s_master_beijing}" namespace helloworld \
    istio-injection=enabled

kubectl label --context="${k8s_master_shanghai}" namespace helloworld \
    istio-injection=enabled
    
kubectl label --context="${k8s_master_guangzhou}" namespace helloworld \
    istio-injection=enabled

kubectl label --context="${k8s_master_shenzhen}" namespace helloworld \
    istio-injection=enabled

在每个集群中创建 HelloWorld 服务

cat > helloworld.yml << 'EOF'
apiVersion: v1
kind: Service
metadata:
  name: helloworld
  labels:
    app: helloworld
    service: helloworld
spec:
  ports:
  - port: 80
    name: http
  selector:
    app: helloworld
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: helloworld
  labels:
    app: helloworld
spec:
  replicas: 1
  selector:
    matchLabels:
      app: helloworld
  template:
    metadata:
      labels:
        app: helloworld
    spec:
      containers:
      - name: helloworld
        image: ccr.ccs.tencentyun.com/huanghuanhui/helloworld:v1
        resources:
          requests:
            cpu: "100m"
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
EOF
kubectl apply --context="${k8s_master_beijing}" -f helloworld.yml -n helloworld

kubectl apply --context="${k8s_master_shanghai}" -f helloworld.yml -n helloworld
    
kubectl apply --context="${k8s_master_guangzhou}" -f helloworld.yml -n helloworld

kubectl apply --context="${k8s_master_shenzhen}" -f helloworld.yml -n helloworld
kubectl --context="${k8s_master_beijing}" set image deployment/helloworld helloworld=ccr.ccs.tencentyun.com/huanghuanhui/helloworld:v1 -n helloworld --record

kubectl --context="${k8s_master_shanghai}" set image deployment/helloworld helloworld=ccr.ccs.tencentyun.com/huanghuanhui/helloworld:v2 -n helloworld --record

kubectl --context="${k8s_master_guangzhou}" set image deployment/helloworld helloworld=ccr.ccs.tencentyun.com/huanghuanhui/helloworld:v3 -n helloworld --record

kubectl --context="${k8s_master_shenzhen}" set image deployment/helloworld helloworld=ccr.ccs.tencentyun.com/huanghuanhui/helloworld:v4 -n helloworld --record

开启envoy 的访问⽇志

# kubectl -n istio-system edit cm istio

kubectl -n istio-system get cm istio -o yaml
apiVersion: v1
data:
  mesh: |-
    defaultConfig:
      discoveryAddress: istiod.istio-system.svc:15012
      meshId: mesh1
      proxyMetadata: {}
      tracing:
        zipkin:
          address: zipkin.istio-system:9411
    defaultProviders:
      metrics:
      - prometheus
    enablePrometheusMerge: true
    rootNamespace: istio-system
    trustDomain: cluster.local
    accessLogFile: /dev/stdout # 新加配置,开启envoy 的访问⽇志
  meshNetworks: 'networks: {}'
kind: ConfigMap
metadata:
  creationTimestamp: "2023-11-12T11:22:50Z"
  labels:
    install.operator.istio.io/owning-resource: installed-state
    install.operator.istio.io/owning-resource-namespace: istio-system
    istio.io/rev: default
    operator.istio.io/component: Pilot
    operator.istio.io/managed: Reconcile
    operator.istio.io/version: 1.19.3
    release: istio
  name: istio
  namespace: istio-system
  resourceVersion: "28123"
  uid: 583affd9-1c38-4100-b127-ce5afca134c6

集群内验证跨集群流量

curl -s \
"$(kubectl get pod -o jsonpath='{.items[0].status.podIP}' -n helloworld)"

kubectl exec -n helloworld \
"$(kubectl get pod -o jsonpath='{.items[0].metadata.name}' -n helloworld)" \
-- curl -s helloworld.helloworld:80


for i in {1..10}; do
  kubectl exec --context="${k8s_master_beijing}" -n helloworld \
    "$(kubectl get pod -o jsonpath='{.items[0].metadata.name}' -n helloworld)" \
    -- curl -s helloworld.helloworld:80
    sleep 1
done

集群外验证跨集群流量

cat > helloworld-vsvc.yml << 'EOF'
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
  name: helloworld-vsvc
  namespace: helloworld
spec:
  gateways:
  - helloworld-gateway
  hosts:
  - "istio.huanghuanhui.cloud"
  http:
  - match:
    - uri:
        exact: /
    route:
    - destination:
        host: helloworld
      weight: 100
EOF
cat > helloworld.yml << 'EOF'
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
  name: helloworld-gateway
  namespace: helloworld
spec:
  selector:
    istio: ingressgateway # 默认创建的 istio ingressgateway pod 有这个 Label
  servers:
  - port:
      number: 80
      name: http
      protocol: HTTP
    hosts:
    - "istio.huanghuanhui.cloud"
EOF
for i in {1..10}; do
    curl -s istio.huanghuanhui.cloud
    sleep 1
done

清理

卸载 k8s-master-beijing 中的 Istio

istioctl uninstall --context="${k8s_master_beijing}" -y --purge

kubectl delete ns istio-system --context="${k8s_master_beijing}"

卸载 k8s-master-shanghai 中的 Istio

istioctl uninstall --context="${k8s_master_shanghai}" -y --purge

kubectl delete ns istio-system --context="${k8s_master_shanghai}"

卸载 k8s-master-guangzhou 中的 Istio

istioctl uninstall --context="${k8s_master_guangzhou}" -y --purge

kubectl delete ns istio-system --context="${k8s_master_guangzhou}"

卸载 k8s-master-shenzhen 中的 Istio

istioctl uninstall --context="${k8s_master_shenzhen}" -y --purge

kubectl delete ns istio-system --context="${k8s_master_shenzhen}"
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值