使用kubeadm部署Kubernetes
参考:http://k8s.unixhot.com/kubernetes/kubeadm-install.html#test
一 .所有节点环境初始化
hostnamectl set-hostname k8s-test
#将解析添加到/etc/hosts
swapoff -a
注释swap 挂载
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum install docker-ce docker-ce-cli -y
mkdir /etc/docker
echo -ne '''
{
"registry-mirrors": ["https://docker.mirrors.ustc.edu.cn"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
''' > /etc/docker/daemon.json
systemctl enable --now docker && systemctl status docker
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
ps: 由于官网未开放同步方式, 可能会有索引gpg检查失败的情况, 这时请用 yum install -y --nogpgcheck kubelet kubeadm kubectl 安装
yum install --nogpgcheck kubelet kubeadm kubectl -y
ps:此处有kubectl和容器运行时(docker)版本的对应关系
yum install -y kubelet-1.23.6 kubeadm-1.23.6 kubectl-1.23.6
#yum install kubelet kubeadm -y
cat << EOF| tee /etc/sysconfig/kubelet
KUBELET_CGROUP_ARGS="--cgroup-driver=systemd"
KUBELET_EXTRA_ARGS="--fail-swap-on=false"
EOF
cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
modprobe overlay
modprobe br_netfilter
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system
lsmod | grep br_netfilter
lsmod | grep overlay
sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward
systemctl enable --now kubelet && systemctl status kubelet
#在Kubernetes集群中Kube-Proxy组件负载均衡的功能,
#默认使用iptables,生产建议使用ipvs进行负载均衡。在所有节点启用ipvs模块
cat << EOF |tee /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod +x /etc/sysconfig/modules/ipvs.modules
source /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
二. 初始化集群master
kubeadm config print init-defaults > kubeadm.yaml
cat kubeadm.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.36.27 #修改为API Server的地址
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
imagePullPolicy: IfNotPresent
name: k8smaster.bjkd.com #修改位master的主机名
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers #修改为阿里云镜像仓库
kind: ClusterConfiguration
kubernetesVersion: 1.23.6 #修改为具体的版本
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/16 #修改为Service 网络
podSubnet: 10.97.0.0/16 #新增pod网络
scheduler: {}
--- #增加三行配置,设置Kubeproxy 使用LVS
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
###开始初始化:
kubeadm init --config kubeadm.yaml --v=5
ps:如果出错,修复错误,然后运行:kubeadm reset
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.36.27:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:c8083db02e35df35fbf31a3e5f361d7efdcb323d8ef3210016695bbb1ffa7e79
###查看组件状态
kubectl get cs
kubectl get pod --all-namespaces
三. 安装网络插件
yum install git -y
git clone --depth 1 https://github.com/coreos/flannel.git
cd flannel/Documentation/
or wget https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
vi kube-flannel.yml
修改"Network": "10.244.0.0/16"为"Network": "10.97.0.0/16",
cat /run/flannel/subnet.env
FLANNEL_NETWORK=10.97.0.0/16
FLANNEL_SUBNET=10.97.1.1/24
FLANNEL_MTU=1450
FLANNEL_IPMASQ=true
kubectl create -f kube-flannel.yml
kubectl apply -f kube-flannel.yml
kubectl get pod -n kube-system
kubectl get node
kubeadm token create --print-join-command
####将输出复制到node执行
四. node 配置
kubeadm join 192.168.36.27:6443 --token wbzuol.14byxhxh98rt28pe --discovery-token-ca-cert-hash sha256:c8083db02e35df35fbf31a3e5f361d7efdcb323d8ef3210016695bbb1ffa7e79
#### node节点异常问题处理
kubeadm reset
rm -rf /var/lib/cni/
systemctl daemon-reload
systemctl restart kubelet
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
五. 状态检查,和测试样例
kubectl get nodes
kubectl get daemonset --all-namespaces
kubectl get pod --all-namespaces
kubectl get nodes --show-labels
kubectl get pod -n kube-system
kubectl get pod --all-namespaces -o wide
kubectl -n kube-system rollout restart deployment coredns
创建一个单Pod的Nginx应用
kubectl create deployment nginx --image=nginx:alpine
kubectl get pod -o wide
curl --head http://10.97.0.2
为Nginx增加Service,会创建一个Cluster
IP,从Master初始化的--service-cidr=10.96.0.0/16地址段中进行分配,
并开启NodePort是在Node节点上进行端口映射,进行外部访问
kubectl expose deployment nginx --port=80
kubectl get service