k8s(V1.25.0)kubeadm方式集群部署(基于docker)

机器准备

IPversionhostnamecpumemory
192.168.142.130centos 7.9k8s-master012C4G
192.168.142.131centos 7.9k8s-node012C4G
192.168.142.132centos 7.9k8s-node022C4G

安装依赖包

yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git

设置防火墙为iptables并设置空规则

systemctl stop firewalld && systemctl disable firewalld
yum -y install iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && systemiptables save

关闭swap和seliux

swapoff -a && sed -ri 's/.*swap.*/#&/' /etc/fstab
setenforce 0 && sed -i 's/enforcing/disabled/' /etc/selinux/config

调整内核参数,针对与k8s

cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 #禁止使用swap空间,只有当系统OOM时才允许使用它
vm.overcommit_memory=1 #不检查物理内存是否够用
vm.panic_on_oom=0 #开启OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf

调整系统时区

#设置系统时区为 中国/上海
timedatectl set-timezone Asia/Shanghai
#将当前的 UTC 时间写入硬件时钟
timedatectl set-local-rtc 0
#重启依赖于系统时间的服务
systemctl restart rsyslog
systemctl restart crond

关闭系统不需要服务

systemctl stop postfix && systemctl disable postfix

设置 rsyslogd 和 systemd journald

mkdir /var/log/journal #持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
#持久化保存到磁盘
Storage=persistent

#压缩历史日志
Compress=yes

SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000

#最大占用空间10G
SystemMaxUse=10G

#单日志文件最大 200M
SystemMaxFileSize=200M

#日志保存时间2周
MaxRetentionsSec=2week

#不将日志转发到syslog
ForwardToSyslog=no
EOF
systemctl restart systemd-journald

升级操作系统内核版本

CentOS 7.x系统自带的3.10.x内核存在一些bug,导致运行的docker,kubernetes不稳定

#获取源
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
 
#安装,装完成后检查 /boot/grub2/grub.cfg中对应内核menuentry中是否包含 initrd16 配置,如果没有,再安装一次!
#耗时可能会有点久
yum --enablerepo=elrepo-kernel install -y kernel-lt 
 
#查看系统的全部内核
rpm -qa | grep kernel

kernel-headers-3.10.0-1160.76.1.el7.x86_64
kernel-tools-libs-3.10.0-1160.el7.x86_64
kernel-3.10.0-1160.el7.x86_64
kernel-tools-3.10.0-1160.el7.x86_64
kernel-lt-5.4.211-1.el7.elrepo.x86_64
#设置开机从新内核启动
grub2-set-default 'CentoS Linux(5.4.211-1.el7.elrepo.x86_64) 7 (Core)'
#重启使配置有效
reboot
#查看正在使用的内核
uname -r

kube-prox开启ipvs的前置条件

modprobe br_netfilter

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack

安装docker

#.安装yum-utils device-mapper-persistent-data lvm2软件包
yum install -y yum-utils device-mapper-persistent-data lvm2

#添加阿里源docker-ce镜像仓库
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

#查看可用docker版本
yum list docker-ce --showduplicates | sort -r

#安装docker(最新版本)
yum update -y && yum install -y docker-ce

#安装指定版本,如:docker-ce-18.03.1.ce-1.el7.centos
yum install docker-ce-18.03.1.ce-1.el7.centos

#启动
systemctl start docker
#停止dcoker
systemctl stop docker
#设置开机启动
systemctl enable docker
#重启docker
systemctl restart docker
#查看docker
docker info

#创建 /etc/docker目录
mkdir /etc/docker

#配置daemon
cat > /etc/docker/daemon.json <<EOF
{
	"exec-opts":["native.cgroupdriver=systemd"],
	"log-driver":"json-file",
	"log-opts":{
		"max-size":"100m"
	}
}
EOF
mkdir -p /etc/systemd/system/docker.service.d

#重启docker服务
systemctl daemon-reload && systemctl restart docker && systemctl enable docker

配置cri-docker使kubernetes以docker作为运行时

自 1.24 版起,Dockershim 已从 Kubernetes 项目中移除。因为历史问题docker却不支持kubernetes主推的CRI(容器运行时接口)标准,所以docker不能再作为kubernetes的容器运行时了,即从kubernetesv1.24开始不再使用docker了。

但是如果想继续使用docker的话,可以在kubelet和docker之间加上一个中间层cri-docker。cri-docker是一个支持CRI标准的shim(垫片)。一头通过CRI跟kubelet交互,另一头跟docker api交互,从而间接的实现了kubernetes以docker作为容器运行时。但是这种架构缺点也很明显,调用链更长,效率更低。

#到下面的链接下载最新版cri-docker: https://github.com/Mirantis/cri-dockerd/tags
#先在master上解压出cri-docker,然后拷贝到其他节点
[root@k8s-master01 ~]# tar -zxf cri-dockerd-0.2.5.amd64.tgz
[root@k8s-master01 ~]# cp cri-dockerd/cri-dockerd /usr/bin/
[root@k8s-master01 ~/cri-dockerd]# scp /usr/bin/cri-dockerd root@k8s-node01:/usr/bin/
[root@k8s-master01 ~/cri-dockerd]# scp /usr/bin/cri-dockerd root@k8s-node02:/usr/bin/
#创建cri-docker启动文件,然后拷贝到其他节点
[root@k8s-master01 ~]# cat /usr/lib/systemd/system/cri-docker.service
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket

[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.8
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

StartLimitBurst=3

StartLimitInterval=60s

LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity

TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target

[root@k8s-master01 ~]# cat /usr/lib/systemd/system/cri-docker.socket
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service

[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target

#启动cri-docker并设置开机自动启动
[root@k8s-master01 ~/cri-dockerd]# systemctl daemon-reload ; systemctl enable cri-docker --now
Created symlink from /etc/systemd/system/multi-user.target.wants/cri-docker.service to /usr/lib/systemd/system/cri-docker.service.
#查看状态
[root@k8s-master01 ~]# systemctl is-active cri-docker
active

安装kubeadm

#配置k8s阿里云源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#安装kubeadm kubelet kubectl最新版
yum install -y kubelet kubeadm kubectl
#设置kubelet开机自启
systemctl enable kubelet.service

初始化主节点

#在master节点执行
[root@k8s-master01 ~]# kubeadm init --image-repository registry.aliyuncs.com/google_containers --kubernetes-version=v1.25.0 --pod-network-cidr=10.244.0.0/16 --upload-certs --cri-socket /var/run/cri-dockerd.sock
W0902 22:40:37.288467   10355 initconfiguration.go:119] Usage of CRI endpoints without URL scheme is deprecated and can cause kubelet errors in the future. Automatically prepending scheme "unix" to the "criSocket" with value "/var/run/cri-dockerd.sock". Please update your configuration!
[init] Using Kubernetes version: v1.25.0
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master01 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.142.130]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.142.130 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master01 localhost] and IPs [192.168.142.130 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 8.003126 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
6294eecda769e52406b6535c2b9a7d2be1026df4ec2d9d656e42071ee29fd493
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master01 as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: 0tmwhc.waj7kbvh1gef893f
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.142.130:6443 --token 0tmwhc.waj7kbvh1gef893f \
	--discovery-token-ca-cert-hash sha256:938af8723c171cf1fcb8d040573536aab86d1882311c84875a2e2a2416097929
	
#按照提示创建kubeconfig文件
[root@k8s-master01 ~]# mkdir -p $HOME/.kube
[root@k8s-master01 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master01 ~]# chown $(id -u):$(id -g) $HOME/.kube/config

然后即可根据提示加入其他node,在node节点上执行下面语句,注意后面加上 --cri-socket /var/run/cri-dockerd.sock 参数

部署网络

#初始化之后查看node节点状态为 NotReady
[root@k8s-master01 ~]# kubectl get node
NAME           STATUS     ROLES           AGE   VERSION
k8s-master01   NotReady   control-plane   18m   v1.25.0

#下载flannel的yml文件并
[root@k8s-master01 ~]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#创建flannel
[root@k8s-master01 ~]# kubectl create -f kube-flannel.yml 
namespace/kube-flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created

[root@k8s-master01 ~]# kubectl get pod -n kube-flannel
NAME                    READY   STATUS    RESTARTS   AGE
kube-flannel-ds-fbnsg   1/1     Running   0          114s
# 再次查看节点状态变为 Ready
[root@k8s-master01 ~]# kubectl get node
NAME           STATUS   ROLES           AGE   VERSION
k8s-master01   Ready    control-plane   26m   v1.25.0

node节点加入集群

#分别在node01 node02执行以下命令,加入集群,如果token过期,可执行:kubeadm token create --ttl 0 --print-join-command  生成新token
kubeadm join 192.168.142.130:6443 --token 0tmwhc.waj7kbvh1gef893f \
	--discovery-token-ca-cert-hash sha256:938af8723c171cf1fcb8d040573536aab86d1882311c84875a2e2a2416097929 \
	--cri-socket /var/run/cri-dockerd.sock
	
#如果执行命令之后一直卡着不动,最后报错:dial tcp 192.168.142.130:6443: connect: no route to host
#则需要清空防火墙规则,在master节点执行以下命令,然后重新加入
systemctl stop kubelet
systemctl stop docker
iptables --flush
iptables -tnat --flush
systemctl start kubelet
systemctl start docker

#查看节点
[root@k8s-master01 ~]# kubectl get node 
NAME           STATUS   ROLES           AGE     VERSION
k8s-master01   Ready    control-plane   65m     v1.25.0
k8s-node01     Ready    <none>          5m24s   v1.25.0
k8s-node02     Ready    <none>          5m19s   v1.25.0
#查看namespace
[root@k8s-master01 ~]# kubectl get ns
NAME              STATUS   AGE
default           Active   78m
kube-flannel      Active   53m
kube-node-lease   Active   78m
kube-public       Active   78m
kube-system       Active   78m
#查看某个namespace下的pod
[root@k8s-master01 ~]# kubectl get pod -n kube-system
NAME                                   READY   STATUS    RESTARTS      AGE
coredns-c676cc86f-76q4q                1/1     Running   1 (18m ago)   77m
coredns-c676cc86f-hdfjq                1/1     Running   1 (18m ago)   77m
etcd-k8s-master01                      1/1     Running   1 (18m ago)   77m
kube-apiserver-k8s-master01            1/1     Running   1 (18m ago)   77m
kube-controller-manager-k8s-master01   1/1     Running   1 (18m ago)   77m
kube-proxy-2zlfk                       1/1     Running   0             17m
kube-proxy-42tvp                       1/1     Running   1 (18m ago)   77m
kube-proxy-qglsr                       1/1     Running   0             17m
kube-scheduler-k8s-master01            1/1     Running   1 (18m ago)   77m
  • 5
    点赞
  • 14
    收藏
    觉得还不错? 一键收藏
  • 1
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值