一、概要
本文主要讲述了在k8s集群中如何使用yaml文件方式部署apisix及其相关组件。
- 安装etcd集群
- 安装apisix
- 安装apisix-dashboard
- 访问并验证apisix服务
二、安装etcd集群
本次安装etcd集群的命名空间为gv-public,副本数为3,使用的StorageClass存储为NFS类型,可根据自己的后端存储进行修改。
yaml文件etcd.yaml
apiVersion: v1
kind: Namespace
metadata:
name: gv-public
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: apisix-etcd
namespace: gv-public
labels:
app.kubernetes.io/instance: apisix-etcd
app.kubernetes.io/name: apisix-etcd
spec:
podManagementPolicy: Parallel
replicas: 3
serviceName: apisix-etcd-headless
selector:
matchLabels:
app.kubernetes.io/instance: apisix-etcd
app.kubernetes.io/name: apisix-etcd
template:
metadata:
labels:
app.kubernetes.io/instance: apisix-etcd
app.kubernetes.io/name: apisix-etcd
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/instance: apisix-etcd
app.kubernetes.io/name: apisix-etcd
topologyKey: kubernetes.io/hostname
weight: 1
containers:
- name: apisix-etcd-app
image: bitnami/etcd:3.4.24
#如果官方镜像获取不到可以直接拉取这个registry.cn-hangzhou.aliyuncs.com/k8s_study_rfb/etcd:3.4.24-x86
imagePullPolicy: IfNotPresent
ports:
- containerPort: 2379
name: client
protocol: TCP
- containerPort: 2380
name: peer
protocol: TCP
env:
- name: BITNAMI_DEBUG
value: 'false'
- name: MY_POD_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.podIP
- name: MY_POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: MY_STS_NAME
value: apisix-etcd
- name: ETCDCTL_API
value: '3'
- name: ETCD_ON_K8S
value: 'yes'
- name: ETCD_START_FROM_SNAPSHOT
value: 'no'
- name: ETCD_DISASTER_RECOVERY
value: 'no'
- name: ETCD_NAME
value: $(MY_POD_NAME)
- name: ETCD_DATA_DIR
value: /bitnami/etcd/data
- name: ETCD_LOG_LEVEL
value: info
- name: ALLOW_NONE_AUTHENTICATION
value: 'yes'
- name: ETCD_ADVERTISE_CLIENT_URLS
value: http://$(MY_POD_NAME).apisix-etcd-headless.gv-public.svc.cluster.local:2379
- name: ETCD_LISTEN_CLIENT_URLS
value: http://0.0.0.0:2379
- name: ETCD_INITIAL_ADVERTISE_PEER_URLS
value: http://$(MY_POD_NAME).apisix-etcd-headless.gv-public.svc.cluster.local:2380
- name: ETCD_LISTEN_PEER_URLS
value: http://0.0.0.0:2380
- name: ETCD_INITIAL_CLUSTER_TOKEN
value: apisix-etcd-cluster-k8s
- name: ETCD_INITIAL_CLUSTER_STATE
value: new
- name: ETCD_INITIAL_CLUSTER
value: apisix-etcd-0=http://apisix-etcd-0.apisix-etcd-headless.gv-public.svc.cluster.local:2380,apisix-etcd-1=http://apisix-etcd-1.apisix-etcd-headless.gv-public.svc.cluster.local:2380,apisix-etcd-2=http://apisix-etcd-2.apisix-etcd-headless.gv-public.svc.cluster.local:2380
- name: ETCD_CLUSTER_DOMAIN
value: apisix-etcd-headless.gv-public.svc.cluster.local
volumeMounts:
- name: data
mountPath: /bitnami/etcd
lifecycle:
preStop:
exec:
command:
- /opt/bitnami/scripts/etcd/prestop.sh
livenessProbe:
exec:
command:
- /opt/bitnami/scripts/etcd/healthcheck.sh
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 30
successThreshold: 1
failureThreshold: 5
readinessProbe:
exec:
command:
- /opt/bitnami/scripts/etcd/healthcheck.sh
initialDelaySeconds: 60
timeoutSeconds: 5
periodSeconds: 10
successThreshold: 1
failureThreshold: 5
securityContext:
fsGroup: 1001
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
storageClassName: managed-nfs-storage #修改为对应的后端动态存储的名称
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
name: apisix-etcd-headless
namespace: gv-public
labels:
app.kubernetes.io/instance: apisix-etcd
app.kubernetes.io/name: apisix-etcd
spec:
ports:
- name: client
port: 2379
protocol: TCP
targetPort: 2379
- name: peer
port: 2380
protocol: TCP
targetPort: 2380
clusterIP: None
selector:
app.kubernetes.io/instance: apisix-etcd
app.kubernetes.io/name: apisix-etcd
publishNotReadyAddresses: true
---
apiVersion: v1
kind: Service
metadata:
name: apisix-etcd
namespace: gv-public
labels:
app.kubernetes.io/instance: apisix-etcd
app.kubernetes.io/name: apisix-etcd
spec:
ports:
- name: client
port: 2379
protocol: TCP
targetPort: 2379
- name: peer
port: 2380
protocol: TCP
targetPort: 2380
selector:
app.kubernetes.io/instance: apisix-etcd
app.kubernetes.io/name: apisix-etcd
创建etcd集群并查看
# kubectl apply -f etcd.yaml
namespace/gv-public created
statefulset.apps/apisix-etcd created
service/apisix-etcd-headless created
service/apisix-etcd created
# kubectl get po -n gv-public
NAME READY STATUS RESTARTS AGE
apisix-etcd-0 1/1 Running 0 94s
apisix-etcd-1 1/1 Running 0 94s
apisix-etcd-2 1/1 Running 0 94s
三、安装apisix
yaml文件apisix.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: apisix
namespace: gv-public
labels:
app.kubernetes.io/instance: apisix
app.kubernetes.io/name: apisix
app.kubernetes.io/version: 2.10.0
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/instance: apisix
app.kubernetes.io/name: apisix
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/instance: apisix
app.kubernetes.io/name: apisix
spec:
volumes:
- name: apisix-config
configMap:
name: apisix
defaultMode: 420
initContainers:
- name: wait-etcd
image: busybox:1.28
#如果官方镜像获取不到可以拉取国内同步的镜像registry.cn-hangzhou.aliyuncs.com/k8s_study_rfb/busybox:1.28-x86
command:
- sh
- '-c'
- >-
until nc -z apisix-etcd.gv-public.svc.cluster.local 2379; do echo
waiting for etcd `date`; sleep 2; done;
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
containers:
- name: apisix
image: 10.3.248.134:10080/k8s/apisix:3.9.0-debian-x86
#如果官方镜像获取不到可以拉取国内同步的镜像registry.cn-hangzhou.aliyuncs.com/k8s_study_rfb/apisix:3.9.0-debian-x86
ports:
- name: http
containerPort: 9080
protocol: TCP
- name: tls
containerPort: 9443
protocol: TCP
- name: admin
containerPort: 9180
protocol: TCP
resources: {}
volumeMounts:
- name: apisix-config
mountPath: /usr/local/apisix/conf/config.yaml
subPath: config.yaml
readinessProbe:
tcpSocket:
port: 9080
initialDelaySeconds: 10
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 6
lifecycle:
preStop:
exec:
command:
- /bin/sh
- '-c'
- sleep 30
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
securityContext: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: ConfigMap
apiVersion: v1
metadata:
name: apisix
namespace: gv-public
data:
config.yaml: >-
apisix:
node_listen: 9080 # APISIX listening port
enable_ipv6: false
enable_control: true
control:
ip: "0.0.0.0"
port: 9092
deployment:
admin:
allow_admin: # https://nginx.org/en/docs/http/ngx_http_access_module.html#allow
- 0.0.0.0/0 # We need to restrict ip access rules for security. 0.0.0.0/0 is for test.
admin_key:
- name: "admin"
key: edd1c9f034335f136f87ad84b625c8f1
role: admin # admin: manage all configuration data
- name: "viewer"
key: 4054f7cf07e344346cd3f287985e76a2
role: viewer
etcd:
host: # it's possible to define multiple etcd hosts addresses of the same etcd cluster.
- "http://apisix-etcd.gv-public.svc.cluster.local:2379"
prefix: "/apisix" # apisix configurations prefix
timeout: 30 # 30 seconds
plugin_attr:
prometheus:
export_addr:
ip: "0.0.0.0"
port: 9091
---
kind: Service
apiVersion: v1
metadata:
name: apisix-admin
namespace: gv-public
labels:
app.kubernetes.io/instance: apisix
app.kubernetes.io/name: apisix
app.kubernetes.io/version: 2.10.0
spec:
ports:
- name: apisix-admin
protocol: TCP
port: 9180
targetPort: 9180
selector:
app.kubernetes.io/instance: apisix
app.kubernetes.io/name: apisix
type: ClusterIP
---
kind: Service
apiVersion: v1
metadata:
name: apisix-gateway
namespace: gv-public
labels:
app.kubernetes.io/instance: apisix
app.kubernetes.io/name: apisix
app.kubernetes.io/version: 2.10.0
spec:
ports:
- name: apisix-gateway
protocol: TCP
port: 80
targetPort: 9080
nodePort: 31684
selector:
app.kubernetes.io/instance: apisix
app.kubernetes.io/name: apisix
type: NodePort
sessionAffinity: None
externalTrafficPolicy: Cluster
创建apisix并查看运行状态
# kubectl apply -f apisix.yaml
# kubectl get pod -n gv-public
NAME READY STATUS RESTARTS AGE
apisix-7848cfb99f-ssfj9 1/1 Running 0 22m
apisix-7848cfb99f-vdxgf 1/1 Running 0 22m
apisix-etcd-0 1/1 Running 0 35m
apisix-etcd-1 1/1 Running 0 35m
apisix-etcd-2 1/1 Running 0 35m
四、安装apisix-dashboard
apisix-dashboard.yaml文件
kind: Deployment
apiVersion: apps/v1
metadata:
name: apisix-dashboard
namespace: gv-public
labels:
app.kubernetes.io/instance: apisix-dashboard
app.kubernetes.io/name: apisix-dashboard
app.kubernetes.io/version: 2.9.0
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: apisix-dashboard
app.kubernetes.io/name: apisix-dashboard
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/instance: apisix-dashboard
app.kubernetes.io/name: apisix-dashboard
spec:
volumes:
- name: apisix-dashboard-config
configMap:
name: apisix-dashboard
defaultMode: 420
containers:
- name: apisix-dashboard
##如果官方镜像获取不到可以拉取国内同步的镜像registry.cn-hangzhou.aliyuncs.com/k8s_study_rfb/apisix-dashboard:3.0.0-alpine-x86
image: apache/apisix-dashboard:3.0.0-alpine
ports:
- name: http
containerPort: 9000
protocol: TCP
resources: {}
volumeMounts:
- name: apisix-dashboard-config
mountPath: /usr/local/apisix-dashboard/conf/conf.yaml
subPath: conf.yaml
livenessProbe:
httpGet:
path: /ping
port: http
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
readinessProbe:
httpGet:
path: /ping
port: http
scheme: HTTP
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
securityContext: {}
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
serviceAccountName: apisix-dashboard
serviceAccount: apisix-dashboard
securityContext: {}
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 10
progressDeadlineSeconds: 600
---
kind: Service
apiVersion: v1
metadata:
name: apisix-dashboard
namespace: gv-public
labels:
app.kubernetes.io/instance: apisix-dashboard
app.kubernetes.io/name: apisix-dashboard
app.kubernetes.io/version: 2.9.0
spec:
ports:
- name: http
protocol: TCP
port: 9000
targetPort: 9000
selector:
app.kubernetes.io/instance: apisix-dashboard
app.kubernetes.io/name: apisix-dashboard
type: NodePort
---
kind: ConfigMap
apiVersion: v1
metadata:
name: apisix-dashboard
namespace: gv-public
labels:
app.kubernetes.io/instance: apisix-dashboard
app.kubernetes.io/name: apisix-dashboard
app.kubernetes.io/version: 2.9.0
data:
conf.yaml: |-
conf:
listen:
host: 0.0.0.0
port: 9000
etcd:
endpoints:
- http://apisix-etcd.gv-public.svc.cluster.local:2379
log:
error_log:
level: warn
file_path: /dev/stderr
access_log:
file_path: /dev/stdout
authentication:
secert: secert
expire_time: 3600
users:
- username: admin
password: admin
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: apisix-dashboard
namespace: gv-public
创建并查看状态
# kubectl apply -f apisix-dashboard.yaml
deployment.apps/apisix-dashboard created
service/apisix-dashboard created
configmap/apisix-dashboard created
serviceaccount/apisix-dashboard created
# kubectl get pod -n gv-public
NAME READY STATUS RESTARTS AGE
apisix-7848cfb99f-ssfj9 1/1 Running 0 28m
apisix-7848cfb99f-vdxgf 1/1 Running 0 28m
apisix-dashboard-7ff898f9cb-rtlgb 1/1 Running 0 16s
apisix-etcd-0 1/1 Running 0 41m
apisix-etcd-1 1/1 Running 0 41m
apisix-etcd-2 1/1 Running 0 41m
五、访问并测试apisix
访问apisi-dashboard控制台可以通过刚才创建的Nodeport类型的Service的端口进行访问,如下:
# kubectl get svc apisix-dashboard -n gv-public
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
apisix-dashboard NodePort 192.168.243.56 <none> 9000:32562/TCP 3m23s
可以通过k8s集群的任意一个节点IP加上32562端口进行访问
创建路由测试:可以通过dashboard控制台创建一个测试路由然后使用刚才Nodeport类型的Service端口31684进行访问
# kubectl get svc apisix-gateway -n gv-public
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
apisix-gateway NodePort 192.168.27.183 <none> 80:31684/TCP 52m