k8s kubedam 搭建方式 (包含高可用解决方法)
apt install ansible
ssh-keygen
ssh-copy-id root@192.168.98.127
ansible host -m command -a 'sudo swapoff -a'
ansible host -m command -a 'usf allow port-id'
ansible host -m command -a 'sudo timedatectl set-timezone Asia'
ansible host -m command -a 'sudo timedatectl'
ansible host -m command -a 'sudo systemctl mask sleep.target suspend.target hibernate.target hybrid-sleep.target'
ansible host -m command -a 'sudo modprobe br_netfilter'
ansible host -m command -a 'lsmod | grep br_netfilter'
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
ansible node -m copy -a "src=/etc/sysctl.d/k8s.conf dest=/etc/sysctl.d/k8s.conf backup=yes mode=744"
ansible host -m command -a 'sudo sysctl --system'
sudo vi /etc/sysctl.d/10-network-security.conf
将文件中如下两个参数的值从2修改为1
net.ipv4.conf.default.rp_filter=1
net.ipv4.conf.all.rp_filter=1
ansible node -m copy -a "src=/etc/sysctl.d/10-network-security.conf dest=/etc/sysctl.d/10-network-security.conf backup=yes mode=744"
使配置生效
ansible host -m command -a 'sudo sysctl --system'
ansible host -m command -a 'sudo apt update && sudo apt install -y docker.io'
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
ansible node -m copy -a "src=/etc/docker/daemon.json dest=/etc/docker/daemon.json backup=yes mode=744"
ansible host -m command -a 'sudo systemctl daemon-reload && sudo systemctl restart docker'
sudo docker info | grep -i cgroup
ansible host -m command -a 'sudo apt-get update && sudo apt-get install -y ca-certificates curl software-properties-common apt-transport-https'
curl -s https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | sudo apt-key add -
sudo tee /etc/apt/sources.list.d/kubernetes.list <<EOF
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
ansible node -m copy -a "src=/etc/apt/sources.list.d/kubernetes.list dest=/etc/apt/sources.list.d/kubernetes.list backup=yes mode=744"
ansible host -m command -a 'sudo apt update'
ansible host -m command -a 'sudo apt install kubeadm=1.17.4-00 kubelet=1.17.4-00 kubectl=1.17.4-00 -y'
ansible host -m command -a 'sudo apt-mark hold kubelet kubeadm kubectl'
ansible host -m command -a 'sudo systemctl enable kubelet'
kubeadm config images list
for imageName in ${images[@]};do
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
done
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.17.17
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.17.17 k8s.gcr.io/kube-proxy:v1.17.17
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.17.17
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.17.17
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.17.17 k8s.gcr.io/kube-apiserver:v1.17.17
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.17.17
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.17.17
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.17.17 k8s.gcr.io/kube-controller-manager:v1.17.17
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.17.17
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.17.17
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.17.17 k8s.gcr.io/kube-scheduler:v1.17.17
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.17.17
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 k8s.gcr.io/pause:3.1
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0 k8s.gcr.io/etcd:3.4.3-0
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.5
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.5 k8s.gcr.io/coredns:1.6.5
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.5
sudo vim /etc/cloud/cloud.cfg
preserve_hostname: true
ansible node -m copy -a "src=/etc/cloud/cloud.cfg dest=/etc/cloud/cloud.cfg backup=yes mode=744"
sudo vim /etc/hosts
ansible node -m copy -a "src=/etc/hosts dest=/etc/hosts backup=yes mode=744"
kubeadm init --apiserver-advertise-address=192.168.33.10 --pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12
--image-repository registry.cn-hangzhou.aliyuncs.com/google_containers --ignore-preflight-errors=all
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config--pod-network-cidr=10.244.0.0/16 --service-cidr=10.96.0.0/12 --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers --ignore-preflight-errors=all
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f kube-flannel.yml
kubectl get nodes
kubeadm token create --print-join-command
kubeadm token create --print-join-command
kubeadm join 192.168.33.10:6443 --token sqdfvx.89ugmx0kid7wtdt1 --discovery-token-ca-cert-hash sha256:57cce0a7a8d0c4e003596ce320129944b79f58ae23ad7e6a76bd72ab5557839b
高可用解决方法
kubectl -n kube-system get configmap kubeadm-config -o jsonpath='{.data.ClusterConfiguration}' > kubeadm.yaml
apiServer:
certSANs:
- api.k8s.local
- k8s-master1
- k8s-master2
- 192.168.166.128
- 192.168.166.129
- 192.168.166.100
extraArgs:
authorization-mode: Node,RBAC
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/k8sxio
kind: ClusterConfiguration
kubernetesVersion: v1.17.11
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
mv /etc/kubernetes/pki/apiserver.{crt,key} ~
kubeadm init phase certs apiserver --config kubeadm.yaml
W0902 10:05:28.006627 832 validation.go:28] Cannot validate kubelet config - no validator is available
W0902 10:05:28.006754 832 validation.go:28] Cannot validate kube-proxy config - no validator is available
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [ydzs-master kubernetes kubernetes.default kubernetes.default.svc
kubernetes.default.svc.cluster.local api.k8s.local k8s-master1 k8s-master2] and IPs [10.96.0.1 192.168.166.128 192.168.166.129 192.168.166.100]
docker ps | grep kube-apiserver | grep -v pause
docker kill docker-id
openssl x509 -in /etc/kubernetes/pki/apiserver.crt -text
kubeadm config upload from-file --config kubeadm.yaml
kubectl -n kube-system edit configmap kubeadm-config
kubectl -n kube-system get configmap kubeadm-config -o yaml
apt install nginx keepalived -y
cat > /etc/nginx/nginx.conf << "EOF"
load_module /usr/lib/nginx/modules/ngx_stream_module.so; #添加内核
user nginx; #这里套添加nginx用户
worker_processes auto;
error_log /var/log/nginx/error.log;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
events {
worker_connections 1024;
}
# 四层负载均衡,为两台Master apiserver组件提供负载均衡,是要stream要添加内核,在第一行已经添加了参数
stream {
log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 192.168.166.128:6443; # Master1 APISERVER IP:PORT
server 192.168.166.129:6443; # Master2 APISERVER IP:PORT
}
server {
listen 16443; # 由于nginx与master节点复用,这个监听端口不能是6443,否则会冲突
proxy_pass k8s-apiserver;
}
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
include /etc/nginx/mime.types;
default_type application/octet-stream;
server {
listen 80 default_server;
server_name _;
location / {
}
}
}
EOF
cat > /etc/keepalived/keepalived.conf << EOF
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_MASTER
}
vrrp_script check_nginx {
script "/etc/keepalived/check_nginx.sh"
}
vrrp_instance VI_1 {
state MASTER
interface ens33 # 修改为实际网卡名
virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
priority 100 # 优先级,备服务器设置 90
advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass 1111
}
# 虚拟IP
virtual_ipaddress {
192.168.166.100/24 #vip地址
}
track_script {
check_nginx
}
}
EOF
cat > /etc/keepalived/check_nginx.sh << "EOF"
#!/bin/bash
count=$(ss -antp |grep 16443 |egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
exit 1
else
exit 0
fi
EOF
chmod +x /etc/keepalived/check_nginx.sh
cat > /etc/keepalived/keepalived.conf << EOF
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_BACKUP
}
vrrp_script check_nginx {
script "/etc/keepalived/check_nginx.sh"
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.166.100/24 #vip地址
}
track_script {
check_nginx
}
}
EOF
cat > /etc/keepalived/check_nginx.sh << "EOF"
#!/bin/bash
count=$(ss -antp |grep 16443 |egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
exit 1
else
exit 0
fi
EOF
chmod +x /etc/keepalived/check_nginx.sh
systemctl daemon-reload
systemctl start nginx
systemctl start keepalived
systemctl enable nginx
systemctl enable keepalived
ip add |grep ens33
$ vi /etc/kubernetes/kubelet.conf
......
server: https://192.168.166.100:16443
name: kubernetes
......
$ systemctl restart kubelet
$ vi /etc/kubernetes/controller-manager.conf
......
server: https://192.168.166.100:16443
name: kubernetes
......
$ docker kill $(docker ps | grep kube-controller-manager | \
grep -v pause | cut -d' ' -f1)
$ vi /etc/kubernetes/scheduler.conf
......
server: https://192.168.166.100:16443
name: kubernetes
......
$ docker kill $(docker ps | grep kube-scheduler | grep -v pause | \
cut -d' ' -f1)
$ kubectl -n kube-system edit cm kube-proxy
......
kubeconfig.conf: |-
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
server: https://api.k8s.local:8443
name: default
......
kubectl -n kube-system edit configmap kubeadm-config
然后在当前配置文件里面里面添加 controlPlaneEndpoint 属性,用于指定控制面板的负载均衡器的地址。
apiVersion: v1
data:
ClusterConfiguration: |
apiServer:
certSANs:
- api.k8s.local
- k8s-master1
- k8s-master2
- 192.168.166.128
- 192.168.166.129
- 192.168.166.100
extraArgs:
authorization-mode: Node,RBAC
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.166.100:16443
controllerManager: {}
......
$ kubectl -n kube-public edit cm cluster-info
......
server: https://192.168.166.100:16443
name: ""
......
$ kubectl cluster-info
Kubernetes master is running at https://192.168.166.100:16443
KubeDNS is running at https://api.k8s.local:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
KubeDNSUpstream is running at https://api.k8s.local:8443/api/v1/namespaces/kube-system/services/kube-dns-upstream:dns/proxy
Metrics-server is running at https://api.k8s.local:8443/api/v1/namespaces/kube-system/services/https:metrics-server:/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
echo "$(kubeadm token create --print-join-command) --control-plane --certificate-key $(kubeadm init phase upload-certs --upload-certs | tail -1)"
root@master:/pp
| tail -1)"
W0726 18:46:09.006414 37566 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0726 18:46:09.006478 37566 validation.go:28] Cannot validate kubelet config - no validator is available
I0726 18:46:10.155711 37575 version.go:251] remote version is much newer: v1.24.3; falling back to: stable-1.17
W0726 18:46:11.758691 37575 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0726 18:46:11.758835 37575 validation.go:28] Cannot validate kubelet config - no validator is available
kubeadm join 192.168.98.42:16443 --token e9g4op.dd1xe2utc7p9r41s --discovery-token-ca-cert-hash sha256:cf427e153ff8eb922428357026b6e1971c80250eef4d174889c398c64bf538b3
--control-plane --certificate-key 2b46f6ddd910fc978a6bfec24b6e4cabcdf875065c5989316ebb00192fa46396 #在要添加的master节点上面输入这个toker
#查看etcd
cat /etc/kubernetes/manifests/etcd.yaml
#查看集群是否正常:
kubectl get node
#最后负载均衡访问测试:
#找K8s集群中任意一个节点,使用curl查看K8s版本测试,使用VIP访问:
curl -k https://192.168.98.42:16443/version
{
"major": "1",
"minor": "17",
"gitVersion": "v1.17.4",
"gitCommit": "8d8aa39598534325ad77120c120a22b3a990b5ea",
"gitTreeState": "clean",
"buildDate": "2020-03-12T20:55:23Z",
"goVersion": "go1.13.8",
"compiler": "gc",
"platform": "linux/amd64"
}
tail /var/log/nginx/k8s-access.log -f
192.168.166.130 192.168.166.129:6443 - [13/Jun/2021:15:06:15 +0800] 200 423
192.168.166.130 192.168.166.128:6443 - [13/Jun/2021:15:06:15 +0800] 200 423
192.168.166.130 192.168.166.128:6443 - [13/Jun/2021:15:06:15 +0800] 200 423
192.168.166.130 192.168.166.129:6443 - [13/Jun/2021:15:06:15 +0800] 200 423
192.168.166.130 192.168.166.129:6443 - [13/Jun/2021:15:06:15 +0800] 200 423