注:一、二、三步为本公众号“K8S--超详细搭建”内容,请翻阅!
第四步:flannel安装
--------------------------------在tp8-31 上安装配置flannel----------------------------------------------------------------------------------
[root@tp8-31 ~]# cd /opt/src/
[root@tp8-31 ~]# wget https://github.com/coreos/flannel/releases/download/v0.12.0/flannel-v0.12.0-linux-amd64.tar.gz
[root@tp8-31 src]# mkdir /opt/flannel-v0.12.0
[root@tp8-31 src]# tar xf flannel-v0.12.0-linux-amd64.tar.gz -C /opt/flannel-v0.12.0/
[root@tp8-31 src]# ln -s /opt/flannel-v0.12.0/ /opt/flannel
[root@tp8-31 src]# cd ..
[root@tp8-31 opt]# cd flannel
[root@tp8-31 flannel]# mkdir cert
[root@tp8-31 flannel]# cd cert/
[root@tp8-31 cert]# scp tp8-100:/opt/certs/ca.pem .
[root@tp8-31 cert]# scp tp8-100:/opt/certs/client.pem .
[root@tp8-31 cert]# scp tp8-100:/opt/certs/client-key.pem .
[root@tp8-31 cert]# cd ..
[root@tp8-31 flannel]# vi subnet.env
FLANNEL_NETWORK=172.7.0.0/16
FLANNEL_SUBNET=172.7.31.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false
[root@tp8-31 flannel]# vi flanneld.sh
#!/bin/sh
./flanneld \
--public-ip=10.5.8.31 \
--etcd-endpoints=https://10.5.8.12:2379,https://10.5.8.21:2379,https://10.5.8.22:2379 \
--etcd-keyfile=./cert/client-key.pem \
--etcd-certfile=./cert/client.pem \
--etcd-cafile=./cert/ca.pem \
--iface=ens33 \
--subnet-file=./subnet.env \
--healthz-port=2401
[root@tp8-31 flannel]# chmod +x flanneld.sh
[root@tp8-31 flannel]# mkdir -p /data/logs/flanneld
[root@tp8-31 flannel]# cd /opt/etcd
[root@tp8-31 etcd]# ./etcdctl member list
[root@tp8-31 etcd]# ./etcdctl set /coreos.com/network/config '{"Network": "172.7.0.0/16", "Backend": {"Type": "host-gw"}}'
[root@tp8-31 etcd]# ./etcdctl get /coreos.com/network/config
[root@tp8-31 etcd]# vi /etc/supervisord.d/flannel.ini
[program:flanneld-8-31]
command=/opt/flannel/flanneld.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/flannel ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/flanneld/flanneld.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
[root@tp8-31 etcd]# supervisorctl update
[root@tp8-31 etcd]# supervisorctl status
[root@tp8-31 etcd]# yum install iptables-services -y
[root@tp8-31 etcd]# systemctl start iptables
[root@tp8-31 etcd]# systemctl enable iptables
---------------------------------iptables优化SNAT规则------------------------------------------------------------------------------------------
[root@tp8-31 etcd]# iptables-save |grep -i postrouting
[root@tp8-31 etcd]# iptables -t nat -D POSTROUTING -s 172.7.31.0/24 ! -o docker0 -j MASQUERADE
[root@tp8-31 etcd]# iptables -t nat -I POSTROUTING -s 172.7.31.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE
[root@tp8-31 etcd]# iptables-save |grep -i postrouting //效果如下列内容所示,注意红色字体部分
:POSTROUTING ACCEPT [8261518:1050335345]
:POSTROUTING ACCEPT [6:366]
:KUBE-POSTROUTING - [0:0]
-A POSTROUTING -s 172.7.31.0/24 ! -d 172.7.0.0/16 ! -o docker0 -j MASQUERADE
-A POSTROUTING -m comment --comment "kubernetes postrouting rules" -j KUBE-POSTROUTING
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -m mark --mark 0x4000/0x4000 -j MASQUERADE
-A KUBE-POSTROUTING -m comment --comment "Kubernetes endpoints dst ip:port, source ip for solving hairpin purpose" -m set --match-set KUBE-LOOP-BACK dst,dst,src -j MASQUERADE
[root@tp8-31 etcd]# iptables-save > /etc/sysconfig/iptables
[root@tp8-31 etcd]# iptables-save |grep -i reject
[root@tp8-31 etcd]# iptables -t filter -D INPUT -j REJECT --reject-with icmp-host-prohibited
[root@tp8-31 etcd]# iptables -t filter -D FORWARD -j REJECT --reject-with icmp-host-prohibited
[root@tp8-31 etcd]# iptables-save > /etc/sysconfig/iptables
--------------------------------在tp8-32 上安装配置flannel----------------------------------------------------------------------------------
[root@tp8-32 ~]# cd /opt/src/
[root@tp8-32 ~]# wget https://github.com/coreos/flannel/releases/download/v0.12.0/flannel-v0.12.0-linux-amd64.tar.gz
[root@tp8-32 src]# mkdir /opt/flannel-v0.12.0
[root@tp8-32 src]# tar xvf flannel-v0.12.0-linux-amd64.tar.gz -C /opt/flannel-v0.12.0/
[root@tp8-32 src]# ln -s /opt/flannel-v0.12.0/ /opt/flannel
[root@tp8-32 src]# cd ../flannel
[root@tp8-32 flannel]# mkdir cert
[root@tp8-32 flannel]# cd cert/
[root@tp8-32 cert]# scp tp8-100:/opt/certs/ca.pem .
[root@tp8-32 cert]# scp tp8-100:/opt/certs/client.pem .
[root@tp8-32 cert]# scp tp8-100:/opt/certs/client-key.pem .
[root@tp8-32 cert]# cd ..
[root@tp8-32 flannel]# vi subnet.env
FLANNEL_NETWORK=172.7.0.0/16
FLANNEL_SUBNET=172.7.32.1/24
FLANNEL_MTU=1500
FLANNEL_IPMASQ=false
[root@tp8-32 flannel]# vi flanneld.sh
#!/bin/sh
./flanneld \
--public-ip=10.5.8.32 \
--etcd-endpoints=https://10.5.8.12:2379,https://10.5.8.21:2379,https://10.5.8.22:2379 \
--etcd-keyfile=./cert/client-key.pem \
--etcd-certfile=./cert/client.pem \
--etcd-cafile=./cert/ca.pem \
--iface=ens33 \
--subnet-file=./subnet.env \
--healthz-port=2401
[root@tp8-32 flannel]# chmod +x flanneld.sh
[root@tp8-32 flannel]# mkdir -p /data/logs/flanneld
[root@tp8-32 flannel]# vi /etc/supervisord.d/flannel.ini
[program:flanneld-8-32]
command=/opt/flannel/flanneld.sh ; the program (relative uses PATH, can take args)
numprocs=1 ; number of processes copies to start (def 1)
directory=/opt/flannel ; directory to cwd to before exec (def no cwd)
autostart=true ; start at supervisord start (default: true)
autorestart=true ; retstart at unexpected quit (default: true)
startsecs=30 ; number of secs prog must stay running (def. 1)
startretries=3 ; max # of serial start failures (default 3)
exitcodes=0,2 ; 'expected' exit codes for process (default 0,2)
stopsignal=QUIT ; signal used to kill process (default TERM)
stopwaitsecs=10 ; max num secs to wait b4 SIGKILL (default 10)
user=root ; setuid to this UNIX account to run the program
redirect_stderr=true ; redirect proc stderr to stdout (default false)
stdout_logfile=/data/logs/flanneld/flanneld.stdout.log ; stderr log path, NONE for none; default AUTO
stdout_logfile_maxbytes=64MB ; max # logfile bytes b4 rotation (default 50MB)
stdout_logfile_backups=4 ; # of stdout logfile backups (default 10)
stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0)
stdout_events_enabled=false ; emit events on stdout writes (default false)
[root@tp8-32 flannel]# supervisorctl update
[root@tp8-32 flannel]# supervisorctl status
[root@tp8-32 ~]# yum install iptables-services -y
[root@tp8-32 ~]# systemctl start iptables
[root@tp8-32 ~]# systemctl enable iptables
[root@tp8-32 ~]# iptables -t filter -D INPUT -j REJECT --reject-with icmp-host-prohibited
[root@tp8-32 ~]# iptables -t filter -D FORWARD -j REJECT --reject-with icmp-host-prohibited
[root@tp8-32 ~]# iptables-save > /etc/sysconfig/iptables
------------------------辅助命令,非需要不必执行以下命令------------------------------------------------------------------------------------
[root@tp8-31 etcd]# route del -net 172.7.31.0/24 gw 10.5.8.31 //删除路由
第五步:coredns插件部署
---------------------------tp8-100 上执行如下命令---------------------------------------------------
[root@tp8-100 ~]# vi /etc/nginx/conf.d/k8s-yaml.monelife.com.conf
server {
listen 80;
server_name k8s-yaml.monelife.com;
location / {
autoindex on;
default_type text/plain;
root /data/k8s-yaml;
}
}
[root@tp8-100 ~]# mkdir -p /data/k8s-yaml/coredns
[root@tp8-100 ~]# nginx -t
[root@tp8-100 ~]# nginx -s reload
---------------------------tp8-11 添加一行DNS 解析---------------------------------------------------
[root@tp8-11 ~]# vi /var/named/monelife.com.zone
[root@tp8-11 ~]# systemctl restart named
---------------------------tp8-100 上传coredns镜像到私有仓库---------------------------------------------------
[root@tp8-100 ~]# cd /data/k8s-yaml/
[root@tp8-100 k8s-yaml]# dig -t A k8s-yaml.monelife.com @10.5.8.11 +short
[root@tp8-100 k8s-yaml]# docker pull coredns/coredns:1.6.1
[root@tp8-100 k8s-yaml]# docker images |grep 1.6.1
[root@tp8-100 k8s-yaml]# docker tag c0f6e815079e harbor.monelife.com/public/coredns:v1.6.1
[root@tp8-100 k8s-yaml]# docker push harbor.monelife.com/public/coredns:v1.6.1
---------------------------tp8-100 创建源文件----------------------------------------------------------------------
[root@tp8-100 k8s-yaml]# cd coredns/
[root@tp8-100 coredns]# vi rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: Reconcile
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
addonmanager.kubernetes.io/mode: EnsureExists
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
[root@tp8-100 coredns]# vi cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
log
health
ready
kubernetes cluster.local 192.168.0.0/16
forward . 10.5.8.11
cache 30
loop
reload
loadbalance
}
[root@tp8-100 coredns]# vi dp.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: coredns
template:
metadata:
labels:
k8s-app: coredns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
containers:
- name: coredns
image: harbor.monelife.com/public/coredns:v1.6.1
args:
- -conf
- /etc/coredns/Corefile
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
[root@tp8-100 coredns]# vi svc.yaml
apiVersion: v1
kind: Service
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: coredns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: coredns
clusterIP: 192.168.0.2
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
- name: metrics
port: 9153
protocol: TCP
---------------------------------------选择tp8-21 或 tp8-22 上执行如下命令--------------------------
[root@tp8-21 ~]# kubectl apply -f http://k8s-yaml.monelife.com/coredns/rbac.yaml
[root@tp8-21 ~]# kubectl apply -f http://k8s-yaml.monelife.com/coredns/cm.yaml
[root@tp8-21 ~]# kubectl apply -f http://k8s-yaml.monelife.com/coredns/dp.yaml
[root@tp8-21 ~]# kubectl apply -f http://k8s-yaml.monelife.com/coredns/svc.yaml
[root@tp8-21 ~]# kubectl get all -n kube-system -o wide
[root@tp8-21 ~]# dig -t A www.baidu.com @192.168.0.2 +short
第六步:ingress之traefik部署
----------------------------------------以下命令在tp8-100上执行----------------------------------------------
[root@tp8-100 ~]# docker pull traefik:v2.2
[root@tp8-100 ~]# docker tag 3d0acfd4b500 harbor.monelife.com/public/traefik:v2.2
[root@tp8-100 ~]# docker push harbor.monelife.com/public/traefik:v2.2
[root@tp8-100 ~]# mkdir -p /data/k8s-yaml/traefik && cd /data/k8s-yaml/traefik
[root@tp8-100 traefik]# vi rbac.yaml
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses/status
verbs:
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system
[root@tp8-100 traefik]# vi ds.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: traefik-ingress
namespace: kube-system
labels:
app: traefik-ingress
spec:
replicas: 2
selector:
matchLabels:
app: traefik-ingress
template:
metadata:
labels:
app: traefik-ingress
spec:
serviceAccountName: traefik-ingress-controller
containers:
- image: harbor.monelife.com/public/traefik:v2.2
name: traefik-ingress
ports:
- name: web
containerPort: 80
hostPort: 81
- name: admin
containerPort: 8080
args:
- --api
- --api.insecure
- --log.level=INFO
- --providers.kubernetesingress
- --entrypoints.web.address=:80
[root@tp8-100 traefik]# vi svc.yaml
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: kube-system
spec:
type: LoadBalancer
selector:
app: traefik-ingress
ports:
- protocol: TCP
port: 80
name: web
targetPort: 80
- protocol: TCP
port: 8080
name: admin
targetPort: 8080
[root@tp8-100 traefik]# vi ingress.yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: traefik-web-ui
namespace: kube-system
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: web
spec:
rules:
- host: traefik.monelife.com
http:
paths:
- path: /
backend:
serviceName: traefik-ingress-service
servicePort: 8080
---------------------------------任意选择运算node节点,此例以tp8-21为例执行如下命令----------------------------------------------------
[root@tp8-21 ~]# kubectl create -f http://k8s-yaml.monelife.com/traefik/rbac.yaml
[root@tp8-21 ~]# kubectl create -f http://k8s-yaml.monelife.com/traefik/ds.yaml
[root@tp8-21 ~]# kubectl create -f http://k8s-yaml.monelife.com/traefik/svc.yaml
[root@tp8-21 ~]# kubectl create -f http://k8s-yaml.monelife.com/traefik/ingress.yaml
-------------------------------------------tp8-31 node节点上执行如下命令-------------------------------------------------------------
[root@tp8-31 ~]# systemctl restart docker
-------------------------------------------tp8-32 node节点上执行如下命令-------------------------------------------------------------
[root@tp8-32 ~]# systemctl restart docker
---------------------------------tp8-22为例执行如下命令----------------------------------------------------
[root@tp8-21 ~]# kubectl get pods -n kube-system //状态要变为running即为成功
---------------------------------tp8-11上配置nginx----------------------------------------------------
[root@tp8-11 ~]# vi /etc/nginx/conf.d/monelife.com.conf
upstream default_backend_traefik {
server 10.5.8.31:81 max_fails=3 fail_timeout=10s;
server 10.5.8.32:81 max_fails=3 fail_timeout=10s;
}
server {
server_name *.monelife.com;
location / {
proxy_pass http://default_backend_traefik;
proxy_set_header Host $http_host;
proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for;
}
}
[root@tp8-11 ~]# vi /var/named/monelife.com.zone
[root@tp8-11 ~]# systemctl restart named
[root@tp8-11 ~]# nginx -s reload
---------------------------------tp8-12上配置nginx----------------------------------------------------
[root@tp8-12 ~]# scp tp8-11:/etc/nginx/conf.d/monelife.com.conf /etc/nginx/conf.d/monelife.com.conf
[root@tp8-12 ~]# nginx -s reload
--------------------------------浏览器中访问以下地址进行验证----------------------------------------------------
http://traefik.monelife.com/
-------------------以下命令不用执行-------------------------------------------------------
[root@tp8-21 ~]# kubectl delete -f http://k8s-yaml.monelife.com/traefik/rbac.yaml
[root@tp8-21 ~]# kubectl delete -f http://k8s-yaml.monelife.com/traefik/ds.yaml
[root@tp8-21 ~]# kubectl delete -f http://k8s-yaml.monelife.com/traefik/svc.yaml
[root@tp8-21 ~]# kubectl delete -f http://k8s-yaml.monelife.com/traefik/ingress.yaml