kubeadm安装k8s集群

kubeadm安装k8s集群

1、环境准备

  • 操作系统:centos:7.9

1.1:节点IP划分

k8s集群IP主机名安装组件
控制节点10.0.0.81k8s-masterapiserver,controller-manager,scheduler,etcd,kube-proxy,docker,calico
工作节点10.0.0.82k8s-node1kubelet,kube-proxy-docker,calico,coredns
工作节点10.0.0.83k8s-node2kubelet,kube-proxy-docker,calico,coredns

1.2.配置主机hosts文件

  • 修改每台机器的/etc/hosts文件,增加如下三行:
10.0.0.81 k8s-master 
10.0.0.82 k8s-node1 
10.0.0.83 k8s-node2

1.3.配置主机之间无密码登录

  • 3个主机执行

[root@k8s-master root]# ssh-keygen #一路回车

[root@k8s-node1 root]# ssh-keygen
[root@k8s-node2 root]# ssh-keygen

  • 生成的密钥文件和私钥文件并拷贝到远程主机

k8s-master

[root@k8s-master root]# ssh-copy-id k8s-master 

[root@k8s-node1 root]# ssh-copy-id k8s-master 

[root@k8s-node2 root]# ssh-copy-id k8s-master 

k8s-node1

[root@k8s-master root]# ssh-copy-id k8s-node1 

[root@k8s-node1 root]# ssh-copy-id k8s-node1 

[root@k8s-node2 root]# ssh-copy-id k8s-node1 

k8s-node2

[root@k8s-master root]# ssh-copy-id k8s-node2

[root@k8s-node1 root]# ssh-copy-id k8s-node2

[root@k8s-node2 root]# ssh-copy-id k8s-node2

1.4.关闭swap

临时关闭

[root@k8s-master root]# swapoff -a

[root@k8s-node1 root]# swapoff -a

[root@k8s-node2root]# swapoff -a

永久关闭

[root@k8s-master root]# vim /etc/fstab 
#/dev/mapper/centos-swap swap                    swap    defaults        0 0    #注释swap这一行

[root@k8s-node1 root]# vim /etc/fstab 
#/dev/mapper/centos-swap swap                    swap    defaults        0 0    #注释swap这一行

[root@k8s-node2 root]# vim /etc/fstab 
#/dev/mapper/centos-swap swap                    swap    defaults        0 0    #注释swap这一行

1.5.修改机器内核参数

k8s-master

[root@k8s-master root]# modprobe br_netfilter
[root@k8s-master root]#  echo "modprobe br_netfilter" >> /etc/profile
[root@k8s-master root]# cat > /etc/sysctl.d/k8s.conf <<EOF
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> net.ipv4.ip_forward = 1
> EOF
[root@k8s-master root]# sysctl -p /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
[root@k8s-master root]# 

k8s-node1

[root@k8s-node1 root]# modprobe br_netfilter
[root@k8s-node1 root]#  echo "modprobe br_netfilter" >> /etc/profile
[root@k8s-node1 root]# cat > /etc/sysctl.d/k8s.conf <<EOF
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> net.ipv4.ip_forward = 1
> EOF
[root@k8s-node1 root]# sysctl -p /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
[root@k8s-node1 root]# 

k8s-node2

[root@k8s-node2 root]# modprobe br_netfilter
[root@k8s-node2 root]#  echo "modprobe br_netfilter" >> /etc/profile
[root@k8s-node2 root]# cat > /etc/sysctl.d/k8s.conf <<EOF
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> net.ipv4.ip_forward = 1
> EOF
[root@k8s-node2 root]# sysctl -p /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
[root@k8s-node2 root]# 

1.6.关闭firewalld防火墙和selinux

  • 关闭防火墙
[root@k8s-master root]#  systemctl stop firewalld ; systemctl disable firewalld
[root@k8s-node1 root]#  systemctl stop firewalld ; systemctl disable firewalld
[root@k8s-node2 root]#  systemctl stop firewalld ; systemctl disable firewalld
  • 关闭selinux
[root@k8s-master root]#  sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
[root@k8s-node1 root]#  sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
[root@k8s-node2 root]#  sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
  • 重启机器后验证
[root@k8s-master root]# reboot
[root@k8s-master root]# getenforce 
Disabled

[root@k8s-node1 root]# reboot
[root@k8s-node1 root]# getenforce 
Disabled

[root@k8s-node1 root]# reboot
[root@k8s-node1 root]# getenforce 
Disabled

#注:显示Disabled表示selinux已关闭

1.7.配置阿里repo源

  • 下载yun-utils
[root@k8s-master root]# yum install yum-utils -y


[root@k8s-node1 root]# yum install yum-utils -y

[root@k8s-node2 root]# yum install yum-utils -y

  • 配置阿里云 docker repo源
yum-config-manager \
    --add-repo \
    https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    
sed -i 's/download.docker.com/mirrors.aliyun.com\/docker-ce/g' /etc/yum.repos.d/docker-ce.repo

yum makecache fast

#三个主机执行
  • 配置安装k8s组件需要的阿里云的repo源
[root@k8s-master root]# vim  /etc/yum.repos.d/kubernetes.repo
[root@k8s-master root]# cat /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0

#拷贝到其他两个节点
[root@k8s-master root]# scp /etc/yum.repos.d/kubernetes.repo k8s-node1:/etc/yum.repos.d/
[root@k8s-master root]# scp /etc/yum.repos.d/kubernetes.repo k8s-node2:/etc/yum.repos.d/

1.8.配置时间同步

  • 安装ntpdate
[root@k8s-master root]# yum install ntpdate -y
[root@k8s-node1 root]# yum install ntpdate -y
[root@k8s-node2 root]# yum install ntpdate -y
  • 跟网络时间做同步
[root@k8s-master root]# ntpdate cn.pool.ntp.org

[root@k8s-node1 root]# ntpdate cn.pool.ntp.org

[root@k8s-node2 root]# ntpdate cn.pool.ntp.org

  • 写入定时任务
[root@k8s-master root]# crontab -e 
*/2 * * * * ntpdate cn.pool.ntp.org &>/dev/null
[root@k8s-node1 root]# crontab -e 
*/2 * * * * ntpdate cn.pool.ntp.org &>/dev/null
[root@k8s-node2 root]# crontab -e 
*/2 * * * * ntpdate cn.pool.ntp.org &>/dev/null

1.9.开启ipvs

[root@k8s-master root]# vim /etc/sysconfig/modules/ipvs.modules 
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in ${ipvs_modules}; do
 /sbin/modinfo -F filename ${kernel_module} > /dev/null 2>&1
 if [ 0 -eq 0 ]; then
 /sbin/modprobe ${kernel_module}
 fi
done

[root@k8s-master root]# scp ipvs.modules k8s-node1:/etc/sysconfig/modules/
[root@k8s-master root]# scp ipvs.modules k8s-node2:/etc/sysconfig/modules/

  • 检查
[root@k8s-master root]# chmod 755  /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
ip_vs_ftp              13079  0 
nf_nat                 26583  1 ip_vs_ftp
ip_vs_sed              12519  0 
ip_vs_nq               12516  0 
ip_vs_sh               12688  0 
ip_vs_dh               12688  0 
ip_vs_lblcr            12922  0 
ip_vs_lblc             12819  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs_wlc              12519  0 
ip_vs_lc               12516  0 
ip_vs                 145458  22 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_lblcr,ip_vs_lblc
nf_conntrack          139264  2 ip_vs,nf_nat
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack
[root@k8s-master root]# 


[root@k8s-node1 root]# chmod 755  /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
ip_vs_ftp              13079  0 
nf_nat                 26583  1 ip_vs_ftp
ip_vs_sed              12519  0 
ip_vs_nq               12516  0 
ip_vs_sh               12688  0 
ip_vs_dh               12688  0 
ip_vs_lblcr            12922  0 
ip_vs_lblc             12819  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs_wlc              12519  0 
ip_vs_lc               12516  0 
ip_vs                 145458  22 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_lblcr,ip_vs_lblc
nf_conntrack          139264  2 ip_vs,nf_nat
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack
[root@k8s-node1 root]# 

[root@k8s-node2 root]# chmod 755  /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
ip_vs_ftp              13079  0 
nf_nat                 26583  1 ip_vs_ftp
ip_vs_sed              12519  0 
ip_vs_nq               12516  0 
ip_vs_sh               12688  0 
ip_vs_dh               12688  0 
ip_vs_lblcr            12922  0 
ip_vs_lblc             12819  0 
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs_wlc              12519  0 
ip_vs_lc               12516  0 
ip_vs                 145458  22 ip_vs_dh,ip_vs_lc,ip_vs_nq,ip_vs_rr,ip_vs_sh,ip_vs_ftp,ip_vs_sed,ip_vs_wlc,ip_vs_wrr,ip_vs_lblcr,ip_vs_lblc
nf_conntrack          139264  2 ip_vs,nf_nat
libcrc32c              12644  4 xfs,ip_vs,nf_nat,nf_conntrack
[root@k8s-node2 root]# 

1.10.安装基础软件包

[root@k8s-master root]# yum install -y \
    device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake \
    libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel vim ncurses-devel \
    autoconf automake zlib-devel python-devel epel-release openssh-server socat ipvsadm conntrack \
    ntpdate telnet tree bash-completion bash-completion-extras sysstat iotop iftop htop nc nmap \
    telnet bc psmisc httpd-tools bind-utils nethogs expect
    
[root@k8s-node1 root]# yum install -y \
   device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake \
   libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel vim ncurses-devel \
   autoconf automake zlib-devel python-devel epel-release openssh-server socat ipvsadm conntrack \
   ntpdate telnet tree bash-completion bash-completion-extras sysstat iotop iftop htop nc nmap \
   telnet bc psmisc httpd-tools bind-utils nethogs expect
   
[root@k8s-node2 root]# yum install -y \
   device-mapper-persistent-data lvm2 wget net-tools nfs-utils lrzsz gcc gcc-c++ make cmake \
   libxml2-devel openssl-devel curl curl-devel unzip sudo ntp libaio-devel vim ncurses-devel \
   autoconf automake zlib-devel python-devel epel-release openssh-server socat ipvsadm conntrack \
   ntpdate telnet tree bash-completion bash-completion-extras sysstat iotop iftop htop nc nmap \
   telnet bc psmisc httpd-tools bind-utils nethogs expect

1.11.安装iptables

[root@k8s-master root]# yum install iptables-services -y
[root@k8s-node1 root]# yum install iptables-services -y
[root@k8s-node2 root]# yum install iptables-services -y
  • 禁用iptables,并清空防火墙规则
[root@k8s-master root]# service iptables stop   && systemctl disable iptables
[root@k8s-master root]# iptables -F

[root@k8s-node1 root]# service iptables stop   && systemctl disable iptables
[root@k8s-node1 root]# iptables -F

[root@k8s-node2 root]# service iptables stop   && systemctl disable iptables
[root@k8s-node2 root]# iptables -F

2.安装docker服务

2.1.安装docker-ce

[root@k8s-master root]# yum install docker-ce -y
[root@k8s-master root]# systemctl start docker && systemctl enable docker.service

[root@k8s-node1 root]# yum install docker-ce -y
[root@k8s-node1 root]# systemctl start docker && systemctl enable docker.service

[root@k8s-node2 root]# yum install docker-ce -y
[root@k8s-node2 root]# systemctl start docker && systemctl enable docker.service

2.1.配置docker镜像加速器

[root@k8s-master root]# sudo mkdir -p /etc/docker
[root@k8s-master root]# sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors":["https://rsbud4vc.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub-mirror.c.163.com","https://pwcj7v7a.mirror.aliyuncs.com", "https://rncxm540.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
[root@k8s-master root]# sudo systemctl daemon-reload
[root@k8s-master root]# sudo systemctl restart docker

[root@k8s-node1 root]# sudo mkdir -p /etc/docker
[root@k8s-node1  root]# sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors":["https://rsbud4vc.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub-mirror.c.163.com","https://pwcj7v7a.mirror.aliyuncs.com", "https://rncxm540.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
[root@k8s-node1  root]# sudo systemctl daemon-reload
[root@k8s-node1  root]# sudo systemctl restart docker

[root@k8s-node2 root]# sudo mkdir -p /etc/docker
[root@k8s-node2  root]# sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors":["https://rsbud4vc.mirror.aliyuncs.com","https://registry.docker-cn.com","https://docker.mirrors.ustc.edu.cn","https://dockerhub.azk8s.cn","http://hub-mirror.c.163.com","https://pwcj7v7a.mirror.aliyuncs.com", "https://rncxm540.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
[root@k8s-node2  root]# sudo systemctl daemon-reload
[root@k8s-node2  root]# sudo systemctl restart docker

#注:修改docker文件驱动为systemd,默认为cgroupfs,kubelet默认使用systemd,所以两者必须一致才可以。

3.安装k8s

3.1.安装初始化k8s的软件包

[root@k8s-master root]# yum install -y kubelet-1.23.1 kubeadm-1.23.1 kubectl-1.23.1
[root@k8s-master root]# systemctl enable kubelet

[root@k8s-node1 root]# yum install -y kubelet-1.23.1 kubeadm-1.23.1 kubectl-1.23.1
[root@k8s-node1 root]# systemctl enable kubelet

[root@k8s-node2 root]# yum install -y kubelet-1.23.1 kubeadm-1.23.1 kubectl-1.23.1
[root@k8s-node2 root]# systemctl enable kubelet
  • 注:每个软件包的含义
    • Kubeadm: kubeadm是一个工具,用来初始化k8s集群的
    • kubelet: 安装在集群所有节点上,用于启动Pod的
    • kubectl: 通过kubectl可以部署和管理应用,查看各种资源,创建、删除和更新各种组件

4.kubeadm初始化k8s集群

4.1.初始化k8s集群需要的离线镜像包

  • 百度网盘下载镜像包

链接:https://pan.baidu.com/s/1P3BJE8bV2JbmrUJKhHp55Q?pwd=nb77
提取码:nb77

  • 上传到 k8s-master,k8s-node1,k8snode2,,并解压
[root@k8s-master root]# docker load -i k8s-images-v1.23.1.tar.gz
[root@k8s-node1 root]# docker load -i k8s-images-v1.23.1.tar.gz
[root@k8s-node2 root]# docker load -i k8s-images-v1.23.1.tar.gz

[root@k8s-master root]# docker images
#注意检查,总11个镜像包

4.3.使用kubeadm初始化k8s集群

 kubeadm init --kubernetes-version=1.23.1  --apiserver-advertise-address=10.0.0.81  --image-repository registry.aliyuncs.com/google_containers  --pod-network-cidr=10.244.0.0/16 --ignore-preflight-errors=SystemVerification
  • 安装成功提示
    在这里插入图片描述
kubeadm join 10.0.0.81:6443 --token j9iymi.3c7vz47mf8pnzfrn \
	--discovery-token-ca-cert-hash sha256:d8147e6039221d4e949d78f4f95fed8da6cc6c3f37f9fa1935e92cabaeb238aa 

#加入集群指令,需要保存,不要复制上面的每个人不是一样的,用生成出来的在node机器执行就可加入集群

4.4.配置kubectl的配置文件config

对kubectl进行授权,这样kubectl命令可以使用这个证书对k8s集群进行管理

[root@k8s-master root]# mkdir -p $HOME/.kube
[root@k8s-master root]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master root]#  chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-master root]# kubectl get nodes
NAME         STATUS     ROLES                  AGE    VERSION
k8s-master   NotReady   control-plane,master   8m3s   v1.23.1
#注:此时集群状态是NotReady状态,因为还没有安装网络插件

5.node节点加入集群

  • 查看加入集群指令
[root@k8s-master root]# kubeadm token create --print-join-command
kubeadm join 10.0.0.81:6443 --token eiic3z.nj0s49x08ae31xek --discovery-token-ca-cert-hash sha256:d8147e6039221d4e949d78f4f95fed8da6cc6c3f37f9fa1935e92cabaeb238aa    

[root@k8s-master root]# kubeadm token create --print-join-command
kubeadm join 10.0.0.81:6443 --token 55746g.lia51lnwhrygtf2j --discovery-token-ca-cert-hash sha256:d8147e6039221d4e949d78f4f95fed8da6cc6c3f37f9fa1935e92cabaeb238aa 
#生成两条加入集群指令,分别在node1和node2上执行
  • node节点执行加入集群指令
[root@k8s-node1 root]# kubeadm join 10.0.0.81:6443 --token eiic3z.nj0s49x08ae31xek --discovery-token-ca-cert-hash sha256:d8147e6039221d4e949d78f4f95fed8da6cc6c3f37f9fa1935e92cabaeb238aa 

[root@k8s-node2 root]# kubeadm join 10.0.0.81:6443 --token 55746g.lia51lnwhrygtf2j --discovery-token-ca-cert-hash sha256:d8147e6039221d4e949d78f4f95fed8da6cc6c3f37f9fa1935e92cabaeb238aa 

在这里插入图片描述

  • 在master上查看集群节点状态
[root@k8s-master root]# kubectl get nodes
NAME         STATUS     ROLES                  AGE     VERSION
k8s-master   NotReady   control-plane,master   18m     v1.23.1
k8s-node1    NotReady   <none>                 5m41s   v1.23.1
k8s-node2    NotReady   <none>                 4m11s   v1.23.1

  • 设置node节点角色为work
[root@k8s-master root]# kubectl label node k8s-node1 node-role.kubernetes.io/worker=worker
node/k8s-node1 labeled
[root@k8s-master root]# kubectl label node k8s-node2 node-role.kubernetes.io/worker=worker
node/k8s-node2 labeled
[root@k8s-master root]# kubectl get nodes
NAME         STATUS     ROLES                  AGE     VERSION
k8s-master   NotReady   control-plane,master   20m     v1.23.1
k8s-node1    NotReady   worker                 8m19s   v1.23.1
k8s-node2    NotReady   worker                 6m49s   v1.23.1

6.安装kubernetes网络组件-Calico

  • 下载配置文件
链接:https://pan.baidu.com/s/1P3BJE8bV2JbmrUJKhHp55Q?pwd=nb77 
提取码:nb77
calico.yaml
  • 使用yaml文件安装calico 网络插件
[root@k8s-master root]# kubectl apply -f  calico.yaml
  • 查看集群状态
[root@k8s-master root]# kubectl get nodes
NAME         STATUS   ROLES                  AGE   VERSION
k8s-master   Ready    control-plane,master   38m   v1.23.1
k8s-node1    Ready    worker                 25m   v1.23.1
k8s-node2    Ready    worker                 23m   v1.23.1
#STATUS状态是Ready,说明k8s集群正常运行了
[root@k8s-master root]# kubectl get pods -n kube-system
NAME                                       READY   STATUS    RESTARTS   AGE
calico-kube-controllers-677cd97c8d-x64wl   1/1     Running   0          11m
calico-node-glk79                          1/1     Running   0          11m
calico-node-jj89w                          1/1     Running   0          11m
calico-node-mz27x                          1/1     Running   0          11m
coredns-6d8c4cb4d-bvxpj                    1/1     Running   0          38m
coredns-6d8c4cb4d-pzhnc                    1/1     Running   0          38m
etcd-k8s-master                            1/1     Running   0          38m
kube-apiserver-k8s-master                  1/1     Running   0          38m
kube-controller-manager-k8s-master         1/1     Running   0          38m
kube-proxy-nd2sw                           1/1     Running   0          26m
kube-proxy-v69sk                           1/1     Running   0          38m
kube-proxy-zzjcv                           1/1     Running   0          24m
kube-scheduler-k8s-master                  1/1     Running   0          38m
# STATUS状态为Running为运行中
  • 21
    点赞
  • 19
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值