安装依赖
yum install -y \
curl \
wget \
systemd \
bash-completion \
lrzsz
安装前检查
1.同步服务器时间
timedatectl set-timezone Asia/Shanghai && timedatectl set-local-rtc 0
systemctl restart rsyslog
systemctl restart crond
2.修改主机名
# 主节点
hostnamectl set-hostname k8s-master
# 从节点
hostnamectl set-hostname k8s-node1
hostnamectl set-hostname k8s-node2
3.修改hosts
cat >/etc/hosts <<EOF
192.168.152.24 k8s-master
192.168.152.25 k8s-node1
192.168.152.26 k8s-node2
EOF
4.关闭防火墙
systemctl disable firewalld.service && systemctl stop firewalld.service
桥接配置
cat >/etc/modules-load.d/k8s.conf <<EOF
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
cat >/etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
EOF
sysctl --system
# 通过运行以下指令确认 br_netfilter 和 overlay 模块被加载
lsmod | egrep 'overlay|br_netfilter'
# 通过运行以下指令确认 net.bridge.bridge-nf-call-iptables、net.bridge.bridge-nf-call-ip6tables 系统变量在你的 sysctl 配置中被设置为 1
sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward
安装docker
yum install -y yum-utils
# 设置yum阿里云镜像
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
mkdir -p /etc/docker
# 设置阿里云镜像/日志/cgroup驱动
cat >/etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"registry-mirrors":["https://hub-mirror.c.163.com","https://docker.mirrors.ustc.edu.cn","https://registry.docker-cn.com"]
}
EOF
yum makecache fast
yum install -y docker-ce-20.10.23 docker-ce-cli-20.10.23 containerd.io
systemctl daemon-reload
systemctl enable docker && systemctl restart docker
安装k8s
1.关闭swap分区或者禁用swap文件
swapoff -a && sed -ri 's/.*swap.*/#&/' /etc/fstab
2.关闭selinux
setenforce 0 && sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
3.安装并运行
# 使用阿里云k8s源
cat >/etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 安装二进制文件
# cni插件
sudo mkdir -p /opt/cni/bin sudo tar -xzvf cni-plugins-linux-amd64-v0.8.2.tgz -C /opt/cni/bin
# crictl
sudo tar -xzvf crictl-v1.22.0-linux-amd64.tar.gz -C /usr/bin
# 安装工具kubelet、kubeadm、kubectl
yum install -y kubelet-1.23.17 kubeadm-1.23.17 kubectl-1.23.17 --disableexcludes=kubernetes
# 设置驱动方式为systemd
cat >/etc/sysconfig/kubelet <<EOF
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
EOF
# 添加kubelet系统服务
sudo cp kubelet.service /etc/systemd/system/
# 拷贝kubeadm配置文件
sudo mkdir -p /etc/systemd/system/kubelet.service.d sudo cp 10-kubeadm.conf /etc/systemd/system/kubelet.service.d/
# 启动并激活kubelet
sudo systemctl enable --now kubelet
# 查看kubelet状态
systemctl status kubelet
# 如果报错,查询错误信息
journalctl -xe
# 输出配置文件
sudo kubeadm config print init¬-defaults
#参考
https://blog.csdn.net/wuxingge/article/details/119462915参考
https://www.cnblogs.com/leleyao/p/16212565.html
# kubeadm的配置文件
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: "192.168.152.24"
bindPort: 6443
nodeRegistration:
taints:
- effect: PreferNoSchedule
key: node-role.kubernetes.io/master
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: v1.23.0
networking:
podSubnet: "172.16.0.0/12,fc00::/48"
serviceSubnet: "10.96.0.0/12,fd00::/108"
# kubeadm集群初始化
kubeadm init --config=kubeadm.yaml
#报错的话
1.报错找不到主机名称,配置hosts文件
2.yum -y install socat conntrack-tools
3.kubelet报不能用fe80是因为kubeadm中nodeip的配置
#如果初始化失败,通过kubeadm reset进行重设
安装网络calico
mkdir -p /k8sdata/network/
wget --no-check-certificate -O /k8sdata/network/calico.yml https://docs.projectcalico.org/manifests/calico.yaml
kubectl create -f /k8sdata/network/calico.yml
# 下载calico插件的yaml
wget https://docs.projectcalico.org/v3.14/manifests/calico.yaml --no-check-certificate
#修改定义pod网络CALICO_IPV4POOL_CIDR的值
vim calico.yaml
# 修改定义pod网络CALICO_IPV4POOL_CIDR的值和kubeadm init pod-network-cidr的值一致
## 取消注释
- name: CALICO_IPV4POOL_CIDR
value: "10.122.0.0/16"
kubectl apply -f calico.yaml
#查看运行状态
kubectl get pods -n kube-system
[root@ricardo-1 k8s]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
ricardo-1 Ready master 43m v1.18.0
ricardo-2 Ready <none> 41m v1.18.0
ricardo-3 Ready <none> 41m v1.18.0
1.找不到本机ipv6地址
修改calico中的ipv6 autodect为IP地址
2.coredns解析不了地址
iptables -P FORWARD ACCEPT
设置k8s命令行补全
! grep -q kubectl "$HOME/.bashrc" && echo "source /usr/share/bash-completion/bash_completion" >>"$HOME/.bashrc"
! grep -q kubectl "$HOME/.bashrc" && echo "source <(kubectl completion bash)" >>"$HOME/.bashrc"
! grep -q kubeadm "$HOME/.bashrc" && echo "source <(kubeadm completion bash)" >>"$HOME/.bashrc"
! grep -q crictl "$HOME/.bashrc" && echo "source <(crictl completion bash)" >>"$HOME/.bashrc"
source "$HOME/.bashrc"
k8s常用命令
# 获得节点
kubectl get nodes
#查看pod
kubectl get pods -A
kubectl get pods -n kube-system
kubectl get pod -n kube-system -o wide
#pod状态
kubectl describe pod coredns-57d4cbf879-xgk2f -n kube-system
kubectl logs -f coredns-57d4cbf879-xgk2f -n kube-system
#查看状态
systemctl status kubelet
systemctl status kubelet.service --now
#日志 - 节点日志
journalctl -f -u kubelet
journalctl -u kubelet
#开机自启动
systemctl enable kubelet
#重启
systemctl restart kubelet
#查看状态
systemctl status kubelet
#查看命名空间
kubectl get ns
#创建空间
kubectl create namespace dev
#重置
kubeadm reset
#reset后删除
rm -rf $HOME/.kube
# 查看k8s的pod网段和svc网段 kubeadm的配置信息存在config-map中
kubectl -n kube-system describe cm kubeadm-config |grep -i pod
# 查看pod网段和svc网段
kubectl -n kube-system describe cm kubeadm-config |grep -i net
将 node 节点加入 master 中的集群(该命令在工作节点node中执行
)。
kubeadm join 192.168.1.220:6443 --token 88gcei.gmh12c7jmh6ksgj6 \
--discovery-token-ca-cert-hash sha256:8a3f8919183a5ff0fe8626615195a044a74b0ea3a004e41c013095d28eea83dc
#重新创建
kubeadm token create --print-join-command
安装node节点 不需要拷贝证书
需要kube-proxy(集群内部服务发现和负载均衡)和calico/cni 和 calico/node和pause