操作系统:centOS
第一步,执行环境配置脚本(所有节点执行)
执行脚本时添加hostname参数。
#!/bin/bash
echo $#
if [ $# -ge 2 ]; then
echo "参数个数过多,请确认是否输入错误"
exit
elif [ $# -le 0 ]; then
echo "无参数 ,请确认是否输入错误"
exit
else
# $1是k8s集群中节点的名字
hostnamectl set-hostname $1
fi
# 修改SeLinux配置,永久禁用
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
# 修改 /etc/fstab 删除或者注释掉swap的挂载,可永久关闭swap
sed -i '/swap/s/^/#/' /etc/fstab
# 安装一个有用的工具,后面使用
yum -y install expect
第二步,安装、配置、启动docker脚本(所有节点执行)
#!/bin/bash
yum -y install ca-certificates curl
#安装docker
curl -fsSL https://get.docker.com | bash -s docker --mirror Aliyun
#启动docker
systemctl enable docker
systemctl start docker
## 修改docker的cgroup driver
# 修改内容是:在文件的最后面增加"exec-opts": ["native.cgroupdriver=systemd"]
#如果不存在 docker目录则创建
if [ ! -d “/etc”/docker ]; then
mkdir /etc/docker
fi
echo -e "修改/etc/docker/daemon.json ... \n"
read -s -n1 -p "按任意键继续 ... "
daemon="/etc/docker/daemon.json"
flag=0
if [ -f "$daemon" ]; then
if [ `grep -c "exec-opts" $daemon` -ne 0 ]; then
echo -e "\n/etc/docker/daemon.json已经native.cgroupdriver=systemd,无需修改 ... \n"
flag=1
fi
fi
if [ $flag == 0 ];then
sudo vim /etc/docker/daemon.json
echo -e "\n修改/etc/docker/daemon.json完成 ... \n"
fi
#、重启
systemctl restart docker
systemctl daemon-reload
systemctl restart docker
systemctl restart kubelet
#查看是否修改成功
docker info|grep "Cgroup Driver"
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet-1.22.0 kubeadm-1.22.0 kubectl-1.22.0
systemctl enable kubelet && systemctl start kubelet
kubectl version
第三步,拉取k8s相关组件的镜像(master节点执行)
#!/bin/bash
k8sApiServer="cr-cn-beijing.volces.com/jike-bigdata/kube-apiserver:v1.22.0"
k8sConMang="cr-cn-beijing.volces.com/jike-bigdata/kube-controller-manager:v1.22.0"
k8sSchedu="cr-cn-beijing.volces.com/jike-bigdata/kube-scheduler:v1.22.0"
k8sProxy="cr-cn-beijing.volces.com/jike-bigdata/kube-proxy:v1.22.0"
etcd="cr-cn-beijing.volces.com/jike-bigdata/etcd:3.5.0-0"
pause="cr-cn-beijing.volces.com/jike-bigdata/pause:3.5"
nums0=($k8sApiServer $k8sConMang $k8sSchedu $k8sProxy $etcd $pause "${arr[@]}")
## 特殊点
#k8s.gcr.io/coredns/coredns:v1.8.4
#quay.io/coreos/flannel:v0.11.0-amd64
coredns=cr-cn-beijing.volces.com/jike-bigdata/coredns:v1.8.4
flannel=cr-cn-beijing.volces.com/jike-bigdata/flannel:v0.11.0-amd64
nums1=($coredns $flannel "${arr[@]}")
## 拉取镜像
for element in ${nums0[@]}
do
echo "--------------- 开始拉取 --------------> " $element
docker pull $element
done
for element in ${nums1[@]}
do
echo "--------------- 开始拉取 --------------> " $element
docker pull $element
done
## 修改镜像仓库名称为k8s.gcr.io
for element in ${nums0[@]}
do
newEle=`echo $element | sed 's@cr-cn-beijing.volces.com/jike-bigdata/@k8s.gcr.io/@g'`
echo "-----打tag-----从" $element "打成" $newEle
docker tag $element $newEle
done
e0=`echo $coredns | sed 's@cr-cn-beijing.volces.com/jike-bigdata/@k8s.gcr.io/coredns/@g'`
echo "-----打tag-----从" $coredns "打成" $e0
docker tag $element $e0
e1=`echo $flannel | sed 's@cr-cn-beijing.volces.com/jike-bigdata/@quay.io/coreos/@g'`
echo "-----打tag-----从" $flannel "打成" $e1
docker tag $element $e1
第四步,初始化master节点(master节点执行)
#!/bin/bash
# init过程可能会出现各种问题
# 遇见问题 journalctl -fu kubelet.service 协助定位问题
############################################
# kubeadm init 只需要在master节点上执行,而不需要在nodes节点上执行,切记!!!!!!!!
kubeadm init --kubernetes-version=v1.22.0 --pod-network-cidr=10.244.0.0/16 --v=5 --ignore-preflight-errors=all
# 若执行成功,执行下面的命令
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf
# error2: not found /var/lib/kubernets/conf.yml
#
# error3: scheduler Unhealthy Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused
# 需要修改下/etc/kubernetes/manifests/下的kube-controller-manager.yaml和kube-scheduler.yaml的端口,或者直接注释掉
echo -e "修改/etc/kubernetes/manifests/下的kube-controller-manager.yaml和kube-scheduler.yaml的端口 \n"
read -s -n1 -p "按任意键继续 ... "
vim /etc/kubernetes/manifests/kube-controller-manager.yaml
echo -e "\n修改kube-controller-manager.yaml完成 ... \n"
read -s -n1 -p "按任意键继续 ... "
vim /etc/kubernetes/manifests/kube-scheduler.yaml
echo -e "\n修改kube-scheduler.yaml完成 ... \n"
# kubeadm init未执行前,kubelet.service会一直在报错
# kubeadm init执行后,kubelet.service的报错会发生变化
# error1: NetworkReady=false reason:NetworkPluginNotReady,这个问题是因为flannel插件没有安装,安装下即可
echo -e "安装kube-flannel开始 ... \n"
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
echo -e "安装kube-flannel完成 ... \n"
systemctl restart kubelet.service
############################################
第五步,将node节点加入到master中(node节点中执行)
#!/bin/bash
############################################
# 这步是在master节点已经启动,且输出了join语句,复制join语句在各个node节点上执行即可
# 在主节点上查看token,kubeadm token list, 如果已经过期了,使用下面的命令重新生成
# kubeadm token create --ttl 0
# 重新生成下证书的hash摘要
# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
# kubeadm join 192.168.0.218:6443 --token h9z90m.ecxiykthjsgz7axt --discovery-token-ca-cert-hash sha256:52e57a8efa8d9dbb715adeecb98273cde1576c84b33b82f5ab5c996eeec90158 --v=5 --ignore-preflight-errors=all
############################################
echo -e "复制master节点输出的kubeadm join语句到node节点上执行"
while read -p "kittyshell> " input; do
if [[ $input =~ ^kubeadm.* ]]; then
$input
break
elif [ "$input" == "quit" ]; then
exit
else
echo "Unknown input: $input"
fi
done
echo -e "kubeadm join完成 ... \n"
## error: Kubernetes-kubectl命令出现错误【The connection to the server localhost:8080 was refused】分析 https://developer.aliyun.com/article/652961
echo -e "复制master节点/etc/kubernetes/admin.conf到node节点相同目录下,开始拷贝 \n"
echo -e "请输入k8s集群master节点的IP地址 ... \n"
# 180.184.69.16
read -p "k8s-master-ip> " masterIP
expect -c "
spawn scp root@$masterIP:/etc/kubernetes/admin.conf /etc/kubernetes/admin.conf
expect {
\"*password\"
{
set timeout 300;
send \"jike@123\r\";
}
\"yes/no\"
{
send \"yes\r\"; exp_continue;}
}
expect eof
"
echo -e "复制master节点/etc/kubernetes/admin.conf到node节点,拷贝完成 \n"
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
source ~/.bash_profile
可安装nginx进行测试 ~~
#从镜像仓库拉取镜像并保存为yml文件在本地
kubectl create deployment web --image=nigix -o yaml --dry-run > nginx.yaml
#使用yml文件进行部署
kubectl apply -f nginx.yaml
#查看部署状态和pod状态
kubectl get deploy
kubectl get pods
#对外暴露服务
kubectl expose deployment web --port=80 --type=NodePort --target-port=80 --name=web1 -o yaml > web1.yaml
kubectl apply -f web1.yaml
#查看svc中对外暴露的端口号
kubectl get pods,svc
#在浏览器中访问任一node节点地址:svc对外暴露端口号即可访问nginx