kubeadm安装k8s(centos7)
—环境准备
1 准备了三台虚拟机,配置是2核4G,40G硬盘,分配IP主机如下(关闭防火墙和selinux)
ip | 主机名 | 作用 |
---|---|---|
10.0.0.10 | master | k8s主节点 |
10.0.0.15 | node1 | k8s从节点1 |
10.0.0.16 | node2 | k8s从节点2 |
1.1 所有节点操作:
# 关闭防火墙
systemctl stop firewalld.service
# 禁用selinux
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
2 域名解析
2.1 所有节点操作:
vim /etc/hosts
###
10.0.0.10 master
10.0.0.15 node1
10.0.0.16 node2
###
3 时间同步
3.1 master节点操作:
# master作为时间服务器master节点操作:
yum -y install chrony
vim /etc/chrony.conf
###
# 注释server开头的行
# server 0.centos.pool.ntp.org iburst
# server 1.centos.pool.ntp.org iburst
# server 2.centos.pool.ntp.org iburst
# server 3.centos.pool.ntp.org iburst
# 添加1行
# 添加如下1行
server 10.0.0.10 iburst
# 去掉下面两行注释并修改如下
allow 10.0.0.0/24
local stratum 10
###
systemctl enable chronyd.service --now
3.2 node从节点操作:
# node从节点作为客户端
yum -y install chrony
vim /etc/chrony.conf
###
# 注释server开头的行
# server 0.centos.pool.ntp.org iburst
# server 1.centos.pool.ntp.org iburst
# server 2.centos.pool.ntp.org iburst
# server 3.centos.pool.ntp.org iburst
# 添加1行
server 10.0.0.10 iburst
###
systemctl enable chronyd.service --now
# 检查时间是否同步
chronyc sources -v
4 关闭swap分区
所有节点操作:
swapoff -a
sed -i '/swap/s/^/#/g' /etc/fstab
5 配置内核参数
所有节点操作:
# 将桥接的IPv4流量传递到iptables的链
cat > /etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system
—安装docker(20.10.1)
1 配置docker-ce yum源
所有节点操作:
# step 1: 安装必要的一些系统工具
yum install -y yum-utils device-mapper-persistent-data lvm2
# Step 2: 添加软件源信息
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# Step 3
sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
2 安装指定版本docker-ce
2.1 本地安装k8s1.21版本
查看官方https://github.com/kubernetes/kubernetes的CHANGELOG
https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.21.md
Update the latest validated version of Docker to 20.10 (#98977, @neolit123) [SIG CLI, Cluster Lifecycle and Node]
从如上文档得知,docker的版本必须在20.10+
2.2 所有节点操作:
rpm -qa | grep docker | xargs yum -y remove
yum list docker-ce.x86_64 --showduplicates | sort -r
# 安装指定版docker-ce
yum install -y docker-ce-20.10.1 docker-ce-cli-20.10.1
systemctl enable docker.service --now
docker --version
3 添加阿里仓库加速器
所有节点操作:
# 检测到“cgroupfs”作为Docker cgroup驱动程序。 推荐的驱动程序是“systemd”
# "exec-opts":["native.cgroupdriver=systemd"]
mkdir -p /etc/docker
tee /etc/docker/daemon.json << EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://b85n5qwm.mirror.aliyuncs.com"]
}
EOF
systemctl daemon-reload && systemctl restart docker
—安装kubelet、kubeadm、kubectl(1.21.2)
1 配置kubernetes yum源
所有节点操作:
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
2 安装kubelet、kubeadm、kubectl指定版本
所有节点操作:
# 安装1.21.2
yum list kubelet --showduplicates | sort -r
yum -y install kubelet-1.21.2 kubeadm-1.21.2 kubectl-1.21.2
# 先不要启动Kubelet
systemctl enable kubelet --now
3 使用kubeadm引导集群
3.1 master节点操作:
# 初始化主节点
kubeadm init --kubernetes-version=1.21.2 \
--apiserver-advertise-address=10.0.0.10 \
--control-plane-endpoint=master \
--image-repository registry.aliyuncs.com/google_containers \
--service-cidr=10.244.0.0/16 \
--pod-network-cidr=192.168.0.0/16
# 所有网络范围不重叠(172.17.0.0/24网段不可用,因为docker0占用)
3.2 备注:初始化主节点前可以先下载好所需的镜像(可选操作)
master节点操作:
tee ./images.sh << EOF
#!/bin/bash
images=(
kube-apiserver:v1.21.2
kube-proxy:v1.21.2
kube-controller-manager:v1.21.2
kube-scheduler:v1.21.2
coredns:1.7.0
etcd:3.4.13-0
pause:3.2
)
for imageName in ${images[@]} ; do
docker pull registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/$imageName
done
EOF
chmod +x ./images.sh && ./images.sh
# node节点也可以下载好kube-proxy:v1.23.3,pause:3.2两个都需要的镜像
3.3 master节点初始化成功后显示
3.4 注意留下kubeadm join的信息,加入节点时要用到
# 加入master节点命令
kubeadm join master:6443 --token rrirpp.3yow3l6prg1bsms6 \
--discovery-token-ca-cert-hash <TOCKEN> --control-plane
# 加入node节点命令
kubeadm join master:6443 --token rrirpp.3yow3l6prg1bsms6 \
--discovery-token-ca-cert-hash <TOCKEN>
# 令牌过期,生成新令牌
kubeadm token create --print-join-command
3.5 创建kubectl
master节点操作:
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
3.6 加入node节点
node节点操作:
kubeadm join master:6443 --token rrirpp.3yow3l6prg1bsms6 \
--discovery-token-ca-cert-hash sha256:68f0722378fe790ed166aa5c6108bf4d592d10144352287eaee3dad85ee6da3f
加入成功后显示
4 安装网络插件
master节点操作:
curl https://projectcalico.docs.tigera.io/manifests/canal.yaml -O
vim canal.yaml
###
# 搜索"CALICO_IPV4POOL_CIDR",打开以下注释,修改值为"192.168.0.0/16"
- name: CALICO_IPV4POOL_CIDR
value: "192.168.0.0/16"
###
kubectl apply -f canal.yaml
# 验证,状态为Ready表示成功,需要等待会
kubectl get nodes
5 其它插件部署,配置(可选,以下全部master节点操作)
1 命令补全
yum -y install bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
2 dashboard
2.1 安装dashboard
kubernetes-dashboard是k8s的UI看板,可以查看、编辑整个集群状态
官方地址:https://github.com/kubernetes/dashboard/
查看release,这里要安装和上面的k8s对应的dashboard
https://github.com/kubernetes/dashboard/releases
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml
vim recommended.yaml
###
# 搜索标签为"kubernetes-dashboard"的"Service",添加2行,如下图
type: NodePort
nodePort: 32500
###
# 获取访问dashboard可视化界面端口
kubectl -n kubernetes-dashboard get svc
访问地址:https://集群任意IP:端口 https://10.0.0.10:32500
2.2 配置dashboard账户,获取令牌,生成kubeconfig文件
# 获取token令牌,使用获取的token可以登陆
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
# 创建service account
kubectl create serviceaccount dashboard-admin -n kube-system
# 绑定默认cluster-admin管理员集群角色
kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
# 获取token令牌
kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
# 生成kubeconfig文件(/root//dashbord-admin.conf),使用dashboard-admin.con可以登陆
DASH_TOCKEN=$(kubectl -n kube-system get secrets dashboard-admin-token-xfz46 -o jsonpath={.data.token}|base64 -d)
kubectl config set-cluster kubernetes --server=10.0.0.10:6443 --kubeconfig=/root/dashbord-admin.conf
kubectl config set-credentials dashboard-admin --token=$DASH_TOCKEN --kubeconfig=/root/dashbord-admin.conf
kubectl config set-context dashboard-admin@kubernetes --cluster=kubernetes --user=dashboard-admin --kubeconfig=/root/dashbord-admin.conf
kubectl config use-context dashboard-admin@kubernetes --kubeconfig=/root/dashbord-admin.conf
3 metric-server
wget https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.5.2/components.yaml
vim components.yaml
###
#添加1行,如下图
- --kubelet-insecure-tls
###
kubectl apply -f components.yaml
# 验证
kubectl top nodes
4 nginx-ingress-controller
wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.1.1/deploy/static/provider/cloud/deploy.yaml
vim deploy.yaml
# 修改内容如下图
# 由于nginx-ingress-controller的service端口修改成了80和443,所以需要修改nodePort的默认端口范围(30000-32767)
vim /etc/kubernetes/manifests/kube-apiserver.yaml
###
# 添加内容,如下图
- --service-node-port-range=1-65535
###
kubectl -n kube-system delete pod kube-apiserver*** #删除后会自动重建
kubectl -n kube-system get pod
# 查看kube-apiserver*** pod运行正常后执行
kubectl apply -f deploy.yaml
5 helm
5.1 helm安装
wget https://get.helm.sh/helm-v3.8.1-linux-amd64.tar.gz
tar -xf helm-v3.8.1-linux-amd64.tar.gz
mv linux-amd64/helm /usr/local/bin/helm
# 验证,显示帮助命令表示成功
helm hepl
# 添加稳定仓库
helm repo add bitnami https://charts.bitnami.com/bitnami
# 列出bitnami仓库可以安装的包
helm search repo bitnami
5.2 helm命令自动补全
# 临时设定
source <(helm completion bash)
# 写配置文件
echo "source <(helm completion bash)" >> ~/.bash_profile
# bash-completion添加配置
helm completion bash > /usr/share/bash-completion/completions/helm