基础环境配置
服务器:
centos7.7 2019
1、规划网络环境:
192.168.7.20 k8s-master.linux.com 2U 4G
192.168.7.21 k8s-node01.linux.com 2U 8G
192.168.7.22 k8s-node02.linux.com 2U 8G
!!!!注意以下步骤三个机器都需要执行
2、配置master与node的主机名解析
┌─[k8s-master]─[~]
└──╼ cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.7.20 k8s-master.linux.com
192.168.7.21 k8s-node1.linux.com
192.168.7.22 k8s-node2.linux.com
3、所有节点配置免密登录(so TM easy)
ssh-keygen
ssh-copy-id localhost
scp -r /root/.ssh/ root@192.168.7.21:/root/
scp -r /root/.ssh/ root@192.168.7.22:/root/
4、所有节点时间同步
┌─[k8s-master]─[~]
└──╼ crontab -l
*/30 * * * * /usr/sbin/ntpdate 120.25.115.20 &> /dev/null
5、禁用swap
swapoff -a
sed -ri '/swap/d' /etc/fstab
检查
┌─[k8s-master]─[~]
└──╼ free -m
total used free shared buff/cache available
Mem: 1819 706 74 11 1038 937
Swap: 0 0 0
6、修改内核参数
┌─[k8s-master]─[~]
└──╼ cat /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2130720
┌─[k8s-master]─[~]
└──╼ sysctl -p /etc/sysctl.d/k8s.conf
添加模块
modprobe br_netfilter
modprobe ip_conntrack
安装软件依赖
yum install -y wget jq psmisc net-tools nfs-utils socat telnet device-mapper-persistent-data lvm2 git tar zip curl conntrack ipvsadm ipset iptables sysstat libseccomp
开启ipvs转发
┌─[k8s-master]─[~]
└──╼ cat /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
#
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
# wq之后
bash /etc/sysconfig/modules/ipvs.modules
配置docker的aliyun的源
# step 1: 安装必要的一些系统工具
yum install -y yum-utils device-mapper-persistent-data lvm2
# Step 2: 添加软件源信息
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
# Step 3
sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
# Step 4: 更新并安装Docker-CE
yum makecache fast
yum -y install docker-ce
# Step 4: 开启Docker服务
service docker start
镜像加速
┌─[k8s-master]─[~]
└──╼ cat /etc/docker/daemon.json
{
"registry-mirrors": ["https://docker.mirrors.ustc.edu.cn","https://registry.cn-hangzhou.aliyuncs.com"]
}
systemctl restart docker
安装containerd.io
yum install containerd.io -y
# 初始化配置文件
containerd config default > /etc/containerd/config.toml
修改config.toml配置文件 只需要修改两个地方
# 开启cgroup
SystemdCgroup = true
# 修改containerd的镜像下载地址
sandbox_image = "registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.9"
systemctl enable --now containerd
安装kubeadm,kubelet,kubectl工具
#配置yum源
┌─[k8s-master]─[~]
└──╼ cat /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes-new/core/stable/v1.29/rpm/
enabled=1
gpgcheck=0
# 版本号一定要写对
yum install -y kubeadm-1.29.1 kubectl-1.29.1 kubelet-1.29.1
┌─[k8s-master]─[~]
└──╼ cat /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
# KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"表示kubelet服务使用systemd作为cgroup驱动
systemctl enable --now kubelet
配置crictl工具
┌─[k8s-master]─[~]
└──╼ cat /etc/crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 10
debug: false
systemctl restart containerd
检测一下
┌─[k8s-master]─[~]
└──╼ crictl images
IMAGE TAG IMAGE ID SIZE
# 如果报错可能是前面的yaml文件写错啦 或者查看/var/log/message日志
在master上生成集群初始化文件(只在master节点操作),参考下面进行修改imageRepository需要改成国内的Ali yun和#的都需要修改
kubeadm config print init-defaults > init-new.yaml
┌─[k8s-master]─[~]
└──╼ cat init-new.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.7.20 # api-server监听的地址
bindPort: 6443
nodeRegistration:
criSocket: unix:///var/run/containerd/containerd.sock
imagePullPolicy: IfNotPresent
name: k8s-master.linux.com #master节点的名称
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.29.1 #集群的版本
networking:
dnsDomain: cluster.local
podSubnet: 10.88.0.0/16 #pod容器的网段
serviceSubnet: 10.96.0.0/16 #service服务的网段
scheduler: {}
集群初始化
kubeadm init --config=init-new.yaml --ignore-preflight-errors=SystemVerification
初始化如果成功的话会给提示(注意最后一行是master的token需要使用他添加工作节点)
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.7.20:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:b2b8ae267148f8964a0bc00838a272bcaab5198d1c3580e57246c4ec05ea13d1
定义Kubeconfig环境变量,便于kubectl客户端工具认证
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /etc/profile
source /etc/profile
添加工作节点(在两个工作节点上操作)(这行命令在初始化结束后的提示代码的最后一行 直接复制用就欧克)
kubeadm join 192.168.7.20:6443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:b2b8ae267148f8964a0bc00838a272bcaab5198d1c3580e57246c4ec05ea13d1
如果后期想添加的话
获取加入命令:在主节点上执行kubeadm token list命令,以获取用于加入集群的token信息。
执行加入操作:在新的节点上执行kubeadm join命令,格式为kubeadm join --token <token> <master节点的IP地址>:<端口号>
验证节点状态:使用kubectl get nodes命令检查新节点是否已经成功加入集群,并显示为Ready状态。
检查
kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master.linux.com NotReady control-plane 4m8s v1.29.1
k8s-node01.linux.com NotReady <none> 47s v1.29.1
k8s-node02.linux.com NotReady <none> 43s v1.29.1
STATUS为NOTReady是因为没有网路连通 接下载部署网络
下面部署网络。2选其一即可
1、部署flannel网络 小型网络适用
#创建flannel网络
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
修改一些配置文件
}
net-conf.json: |
{
"Network": "10.88.0.0/16", #改成k8s上pod的网段
"EnableNFTables": false,
"Backend": {
"Type": "vxlan"
}
}
#导入
kubectl apply -f kube-flannel.yml
静静等待即可
kubectl get pod -A #检查
2、部署calico网络 基于BGP协议,适用于大型集群
yum install -y git
git clone https://gitee.com/wangxudong0808/calico.git
cd calico
mv calico /root
[root@k8s-master ~]# vim calico.yaml
- name: CALICO_IPV4POOL_CIDR
value: "10.88.0.0/16"
kubectl create -f calico.yaml
测试 看STATUS是否是Running
┌─[k8s-master]─[~]
└──? kubectl get pod -A
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-flannel kube-flannel-ds-k8sb5 1/1 Running 0 4h19m
kube-flannel kube-flannel-ds-kk5fn 1/1 Running 0 4h19m
kube-flannel kube-flannel-ds-n6w55 1/1 Running 0 4h19m
kube-system coredns-5f98f8d567-drb6w 1/1 Running 0 4h51m
kube-system coredns-5f98f8d567-q9h94 1/1 Running 0 4h51m
kube-system etcd-k8s-master.linux.com 1/1 Running 0 4h52m
kube-system kube-apiserver-k8s-master.linux.com 1/1 Running 0 4h52m
kube-system kube-controller-manager-k8s-master.linux.com 1/1 Running 0 4h52m
kube-system kube-proxy-xcfk5 1/1 Running 0 4h51m
kube-system kube-proxy-xrrxc 1/1 Running 0 4h35m
kube-system kube-proxy-z8cls 1/1 Running 0 4h34m
kube-system kube-scheduler-k8s-master.linux.com 1/1 Running 0 4h52m
看STATUS是否为Ready
┌─[k8s-master]─[~]
└──╼ kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master.linux.com Ready control-plane 4h53m v1.29.1
k8s-ndoe2.linux.com Ready <none> 4h36m v1.29.1
k8s-node1.linux.com Ready <none> 4h36m v1.29.1