目录
节点关闭selinux firewalld 以及增加本机解析
所有节点安装kubeadm、kubelet 并且设置yum源
准备环境
节点 centos7 OS | ip |
master-01 | 192.168.1.10 Nat网卡 |
master-02 | 192.168.1.20 Nat网卡 |
node | 192.168.1.30 Nat网卡 |
节点关闭selinux firewalld 以及增加本机解析
三个节点都需要
[root@master-01 ~]# vi /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.10 master-01
192.168.1.20 master-02
192.168.1.30 node
把hosts文件复制到其他节点上
[root@master-01 ~]# scp /etc/hosts root@192.168.1.20:/etc/
[root@master-01 ~]# scp /etc/hosts root@192.168.1.30:/etc/
关闭selinux(所有节点都要关闭)
[root@master-01 ~]# sed -i 's/^SELINUX=enforcing$/SELINUX=disabled/' /etc/selinux/config
[root@master-01 ~]# setenforce 0
关闭防火墙
[root@master-01 ~]# systemctl stop firewalld
[root@master-01 ~]# systemctl disable firewalld
master节点做时间同步
master-01节点
[root@master-01 ~]# yum install -y ntp ntpdate
[root@master-01 ~]# ntpdate ntp1.aliyun.com
[root@master-01 ~]# vi /etc/ntp.conf 添加
server 192.168.1.10
master-02节点
[root@master-02 ~]# yum -y install ntpdate
同步master-01
[root@master-02 ~]# ntpdate master-01
25 Jun 09:44:58 ntpdate[68017]: adjust time server 192.168.1.10 offset -0.000083 sec
限制用户对系统资源的使用
两台maser节点做就可以了
[root@master-01 ~]# echo "* soft nofile 65536" >> /etc/security/limits.conf
[root@master-01 ~]# echo "* hard nofile 65536" >> /etc/security/limits.conf
[root@master-01 ~]# echo "* soft nproc 65536" >> /etc/security/limits.conf
[root@master-01 ~]# echo "* hard nproc 65536" >> /etc/security/limits.conf
[root@master-01 ~]# echo "* soft memlock unlimited" >> /etc/security/limits.conf
[root@master-01 ~]# echo "* hard memlock unlimited" >> /etc/security/limits.conf
master节点上安装keepalived
[root@master-01 ~]# yum install -y keepalived
[root@master-01 ~]# vi /etc/keepalived/keepalived.conf //删掉之前的文件内容,添加如下内容
! Configuration File for keepalived
global_defs {
router_id LVS_k8s
}
vrrp_script check_haproxy {
script "killall -0 haproxy"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state MASTER
interface eth0 #改为自己的网卡
virtual_router_id 51
priority 50
advert_int 1
authentication {
auth_type PASS
auth_pass 35f18af7190d51c9f7f78f37300a0cbd
}
virtual_ipaddress {
192.168.1.100 #VIP
}
track_script {
check_haproxy
}
}
把改好的配置复制到master-02节点上
[root@master-01 ~]# scp /etc/keepalived/keepalived.conf root@192.168.1.20:/etc/keepalived/
启动keepalived
[root@master-01 ~]# systemctl enable keepalived && systemctl start keepalived
查看状态
[root@master-01 ~]# systemctl status keepalived
master节点安装haproxy
[root@master-01 ~]# yum install -y haproxy
[root@master-01 ~]# vi /etc/haproxy/haproxy.cfg
在配置文件defaults的最后添加 内容
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
listen k8s_haproxy
bind *:6666
mode tcp
server master-01 192.168.1.10:6443 check inter 2000 rise 2 fall 5
server master-02 192.168.1.20:6443 check inter 2000 rise 2 fall 5
复制到master-02
[root@master-01 ~]# scp /etc/haproxy/haproxy.cfg root@192.168.1.20:/etc/haproxy/
[root@master-01 ~]# systemctl enable haproxy && systemctl start haproxy && systemctl status haproxy
所有节点安装docker
[root@master-01 ~]# yum -y install docker
[root@master-01 ~]# systemctl start docker && systemctl enable docker
[root@master-01 ~]#vi /etc/docker/daemon.json
{
"registry-mirrors": ["https://elcwges7.mirror.aliyuncs.com"] //换成阿里镜像源
}
[root@master-01 ~]# systemctl daemon-reload
[root@master-01 ~]# systemctl restart docker
复制到master-02、node节点
[root@master-01 ~]# scp /etc/docker/daemon.json root@192.168.1.20:/etc/docker
[root@master-01 ~]# scp /etc/docker/daemon.json root@192.168.1.30:/etc/docker
设置允许路由转发
[root@master-01 ~]# vi /etc/sysctl.conf
# sysctl settings are defined through files in
# /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/.
#
# Vendors settings live in /usr/lib/sysctl.d/.
# To override a whole file, create a new file with the same in
# /etc/sysctl.d/ and put new settings there. To override
# only specific settings, add a file with a lexically later
# name in /etc/sysctl.d/ and put new settings there.
#
# For more information, see sysctl.conf(5) and sysctl.d(5).
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
使路由转发生效
[root@master-01 ~]# sysctl -p
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
复制到master-02、node节点
[root@master-01 ~]# scp /etc/sysctl.conf root@192.168.1.20:/etc/
[root@master-01 ~]# scp /etc/sysctl.conf root@192.168.1.30:/etc/
所有节点安装kubeadm、kubelet 并且设置yum源
[root@master-01 ~]# cat /etc/yum.repos.d/local.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
[root@master-01 ~]# yum clean all
[root@master-01 ~]# yum list
[root@master-01 ~]# scp /etc/yum.repos.d/local.repo root@192.168.1.20:/etc/yum.repos.d/
[root@master-01 ~]# scp /etc/yum.repos.d/local.repo root@192.168.1.20:/etc/yum.repos.d/
所有节点安装kubelet
[root@master-01 ~]# yum install -y kubelet-1.18.2-0 kubeadm-1.18.2-0 kubectl-1.18.2-0 --disableexcludes=kubernetes
[root@master-01 ~]# systemctl start kubelet ; systemctl enable kubelet
在master-01节点生成配置文件
[root@master-01 ~]# kubeadm config print init-defaults> kubeadm-config.yaml
[root@master-01 ~]# vi kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.1.10 //本机ip
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: master-01 //本地主机的hosts
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: 192.168.1.100:6666 //vip地址
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers //docker拉取的镜像源
kind: ClusterConfiguration
kubernetesVersion: v1.18.2 //拉取的版本
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
podSubnet: 10.244.0.0/16
scheduler: {}
在节点初始化
[root@master-01 ~]# kubeadm init --config kubeadm-config.yaml
初始化成功
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join 192.168.1.100:6666 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:32b0de90c3b1515b0e5df360e96fa4328ea17facfd234f689ff1c1a4486e5e4c \
--control-plane //用于加入master集群
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.1.100:6666 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:32b0de90c3b1515b0e5df360e96fa4328ea17facfd234f689ff1c1a4486e5e4c //用于加入work节点
[root@master-01 ~]# mkdir -p $HOME/.kube
[root@master-01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master-01 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
安装网络插件
calico_v3.10.yaml
链接:https://pan.baidu.com/s/1eYxsDF-9aPpjnt3iKfy7BQ
提取码:thq0
[root@master-01 ~]# sed -i 's/192.168.0.0/10.244.0.0/g' calico_v3.10.yaml
[root@master-01 ~]# kubectl apply -f calico_v3.10.yaml //启动成功后,要等一会节点才能跑起来
查看kubectl节点状态
[root@master-01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master-01 Ready master 6m37s v1.18.2
加入master节点
加入master节点
在master-02上创建认证目录
[root@master-02 ~]# mkdir -p /etc/kubernetes/pki/etcd
把master-01上的认证文件发送到master-02上
[root@master-01 ~]# scp /etc/kubernetes/admin.conf root@master-02:/etc/kubernetes
[root@master-01 ~]# scp /etc/kubernetes/pki/{ca.*,sa.*,front-proxy-ca.*} root@master-02:/etc/kubernetes/pki
[root@master-01 ~]# scp /etc/kubernetes/pki/etcd/ca.* root@master-02:/etc/kubernetes/pki/etcd
最后加入节点
[root@master-02 ~]# kubeadm join 192.168.1.100:6666 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:32b0de90c3b1515b0e5df360e96fa4328ea17facfd234f689ff1c1a4486e5e4c --control-plane
加入成功
To start administering your cluster from this node, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@master-02 ~]# mkdir -p $HOME/.kube
[root@master-02 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master-02 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
加入node节点
加入node节点
[root@node ~]# kubeadm join 192.168.1.100:6666 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:32b0de90c3b1515b0e5df360e96fa4328ea17facfd234f689ff1c1a4486e5e4c
加入成功
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[root@master-01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master-01 Ready master 4d1h v1.18.2
master-02 Ready master 2m14s v1.18.2
node Ready <none> 4d1h v1.18.2