K8S(一) Kubeadm部署kubernetes(1.15.1版本)脚本

前期准备

检查 centos / hostname

# 以下命令在 master 节点和 worker 节点都要执行
# 此处 hostname 的输出将会是该机器在 Kubernetes 集群中的节点名字
# 不能使用 localhost 作为节点的名字
#修改主机名称(可提前手动在各个节点执行)
#在192.168.1.1上
hostnamectl set-hostname master
#在192.168.1.2上
hostnamectl set-hostname node01
#在192.168.1.3上
hostnamectl set-hostname node02

#查看系统内核版本
[root@master opt]# cat /etc/redhat-release 
CentOS Linux release 7.6.1810 (Core) 

# 核对 CPU 信息
[root@master opt]# lscpu
# Architecture: x86_64    本安装文档不支持 arm 架构
# CPU(s):       2         CPU 内核数量不能低于 2

在所有节点上配置/etc/hosts本地解析

[root@master opt]#cat ${PWD}/host.list
192.168.1.1 master
192.168.1.2 node01
192.168.1.3 node02 

调整每个节点文件句柄数(可提前手动在各节点执行该脚本)

[root@master opt]# cat ${PWD}/limit.sh  
#!/bin/bash
sed -i  '/65535/d' /etc/security/limits.conf
echo "* hard nofile 65535
* soft nofile 65535
* hard nproc 65535
* soft nproc 65535" >> /etc/security/limits.conf
sed -i  "s/4096/65535/"  /etc/security/limits.d/20-nproc.conf

部署脚本

创建一个集群节点信息维护list,内容依次为主机名、ip、用户、密码并且注意master一定在首行,否则无法生成create_info.list推送给各个work节点

[root@master opt]# cat ${PWD}/ip.list   
master  192.168.1.1   root  Helloword123
node01  192.168.1.2   root  Helloword123
node02  192.168.1.3   root  Helloword123

部署
集成的安装脚本,会去调用配置好的各个脚本

[root@master opt]# cat install-kubernetes.sh 
#!/bin/bash
#Kubernetes v1.15.1
#Docker-ce 18.09.7
#Cent OS 7.6
#(cpu: 2core   mem: 4G) * 3台
#注意!此脚本在选定的master节点上执行
if [ ! -f ~/.ssh/id_rsa ];
        then
                ssh-keygen -P "" -f ~/.ssh/id_rsa
fi
master_ip=$(ip addr |grep inet |grep -v inet6 |grep eth0|awk '{print $2}' |awk -F "/" '{print $1}')
nums=$(cat  ${PWD}/ip.list |wc -l)
for num in  $(seq ${nums})
do 
node_info=$(cat  ${PWD}/ip.list |awk "NR==${num}")
#获取节点hostname
name=$(echo ${node_info} |awk '{print $1}')
#获取节点ip
ip=$(echo ${node_info} |awk '{print $2}')
#获取各个节点账号用户
server_account=$(echo ${node_info} |awk '{print $3}')
#获取各个节点密码
passwd=$(echo ${node_info} |awk '{print $4}')
if [ -z "${name}" -o -z "${ip}" -o -z "${server_account}" -o -z "${passwd}" ]; then
     echo -e  "\033[033m--------------------- ${node_info} 不符合主机hostname+ip+用户名+密码的格式-----------------------\033[0m"
     exit
fi 
#往各个节点推公钥
$PWD/ssh_exp.sh $ip $server_account $passwd
#修改各节点hostname
ssh  -t -t -p 22 ${server_account}@${ip} "hostnamectl set-hostname ${name}"
#将master以及work节点都需要的执行的脚本分发到所有节点
#需要解析的hosts list
scp ${PWD}/host.list  ${ip}:/root/host.list
#调整文件句柄数脚本
scp ${PWD}/limit.sh  ${ip}:/root/limit.sh
#docker安装配置脚本
scp ${PWD}/k8s_docker.sh  ${ip}:/root/k8s_docker.sh
#nfs-utils安装脚本
scp ${PWD}/k8s_nfs.sh  ${ip}:/root/k8s_nfs.sh
#kubelet、kubeadm、kubectl 配置安装脚本
scp ${PWD}/k8s_sysbase.sh  ${ip}:/root/k8s_sysbase.sh
#${PWD}/docker-nfs_exp.sh ${ip}
#${PWD}/common_exp.sh ${ip}
if [[ ${ip} = ${master_ip} ]];
	then
         #初始化master节点
                echo -e "\033[34m --------------------begin install master node----------------------\033[0m"
		scp ${PWD}/init_master.sh  ${ip}:/root/init_master.sh
		scp ${PWD}/master.sh  ${ip}:/root/master.sh
      #  	${PWD}/master_exp.sh ${ip}
                ssh -t -t -p 22 ${server_account}@${ip} "sh /root/master.sh"
	else
          #初始化work节点
                echo -e "\033[34m --------------------begin install work node----------------------\033[0m"
                scp ${PWD}/work.sh  ${ip}:/root/work.sh
        	scp ${PWD}/init_work.sh   ${ip}:/root/init_work.sh   	
	#	${PWD}/work_exp.sh ${ip}
                ssh -t -t -p 22 ${server_account}@${ip} "sh /root/client.sh"
fi
done
wait 
		   ssh -t -t -p 22 root@${master_ip} "kubectl get nodes"

master节点分发公钥脚本

[root@master opt]# cat ${PWD}/ssh_exp.sh 
#!/usr/bin/expect -f
set port 22
set username root
set node_ip [lindex $argv 0] 
set server_account  [lindex $argv 1] 
set password [lindex $argv 2] 

spawn ssh-copy-id -p $port $username@${node_ip}
expect {
    "yes/no" { send "yes\r"; exp_continue }
    "password:" { send "${password}\r" }   
    }
expect eof {exit} 

安装docker脚本

[root@master opt]# cat ${PWD}/k8s_docker.sh
#!/bin/bash
#安装docker
#卸载旧版本
# 在 master 节点和 worker 节点都要执行
#yum remove -y docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-selinux docker-engine-selinux docker-engine
#设置 yum repository
rpm -qa |grep docker
if [ $? -eq 0 ];
        then
                yum -y  remove docker-ce-18.09.7-3.el7.x86_64
                yum -y remove docker-ce-cli-18.09.7-3.el7.x86_64

fi
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

#安装docker
yum install -y docker-ce-18.09.7 docker-ce-cli-18.09.7 containerd.io

#官网建议修改docker driver为systemd,作者修改后发现docker会起不来,不修改正常,所以视具体情况而定
#sed -ir '/^ExecStart=/cExecStart=/usr/bin/dockerd --exec-opt native.cgroupdriver=systemd/' /usr/lib/systemd/system/docker.service

#启动docker
systemctl enable docker
systemctl start docker
docker --version

安装nfs-tuils以及kubectl、kubeadm、kubelet脚本

[root@master opt]# cat ${PWD}/k8s_nfs.sh 
#!/bin/bash
#在 master 节点和 worker 节点都要执行
#安装 nfs-utils,必须先安装 nfs-utils 才能挂载 nfs 网络存储
yum install -y nfs-utils

[root@master opt]# cat ${PWD}/k8s_sysbase.sh 
#!/bin/bash
# 在 master 节点和 worker 节点都要执行

# 配置K8S的yum源
cat <<EOF >> /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
       http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 关闭 防火墙、SeLinux、swap
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap >> /etc/fstab

#修改 /etc/sysctl.conf并重新加载
cat <<EOF >> /etc/sysctl.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl -p

#安装kubelet、kubeadm、kubectl
yum install -y kubelet-1.15.1 kubeadm-1.15.1 kubectl-1.15.1

#执行以下命令使用 docker 国内镜像,提高 docker 镜像下载速度和稳定性
curl -sSL https://get.daocloud.io/daotools/set_mirror.sh | sh -s http://f1361db2.m.daocloud.io
#sed -ir '/^ExecStart=/cExecStart=/usr/bin/dockerd --exec-opt native.cgroupdriver=systemd/' /usr/lib/systemd/system/docker.service
systemctl daemon-reload
systemctl restart docker
systemctl enable kubelet && systemctl start kubelet

初始化master节点脚本

#本例为部署本地化项目,master也用于部署公共服务,而不做work节点使用
[root@master opt]# cat ${PWD}/init_master.sh 
#!/bin/bash
# 只在 master 节点执行
# 替换其中的 192.168.1.1 为 master 的实际 ip 地址
#如果上一次master初始化失败,需要reset
kubeadm reset -f
systemctl daemon-reload
systemctl restart kubelet
cat <<EOF > /root/kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: v1.15.1
advertiseAddress: 192.168.1.1
bindPort: 6443
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers #使用阿里镜像源
controlPlaneEndpoint: "192.168.1.1:6443"   #不设置默认使用advertiseAddress+bindPort作为集群api地址,且不支持多master配置
networking:
  podSubnet: "10.10.0.1/20" 
EOF
kubeadm init --config=/root/kubeadm-config.yaml --upload-certs
# 初始化 root 用户的 kubectl 配置
rm -rf /root/.kube/
mkdir /root/.kube/
cp -i /etc/kubernetes/admin.conf /root/.kube/config   #后面如果需要在非master 节点安装kubectl,需要用到
#此时kube-system该namespace下kube-proxy由于没有安装网络组件,任然处于pengding状态
# calico
#也可以选用flannel替代calico构建集群网络,calico根据iptables规则在三层进行路由转发,并没有进行封包,解包的过程,网络带宽和延迟和主机性能差不多,相比之下,效率比flannel高很多,其在二层进行网络实现,不允许跨namespace访问 。
kubectl apply -f https://docs.projectcalico.org/v3.6/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
sleep 120  #等待2分钟,确保init阶段创建的 etcd apiserver controller kube-proxy scheduler等组件的pod处于running状态(生成的yaml文件在 /etc/kubernetes/manifests/下)
kubectl get pod -n kube-system && kubectl get nodes

# 获得 join命令参数
echo >/opt/create_info.list
sed -i  '1d' /opt/create_info.list
kubeadm token create --print-join-command >/opt/create_info.list
#将master初始化后生成的token和crt保存到create_info.list中并scp到work node,同时也方便后期添加work节点使用
scp /opt/create_info.list node01:/root
scp /opt/create_info.list node02:/root

初始化work节点脚本

[root@master opt]# cat ${PWD}/init_work.sh
#!/bin/bash
# 初始化worker
# 只在 worker 节点执行,测试环境master也可以
#read -p "请输入集群token: "  token
#read -p "请输入集群crt: "  crt
kubeadm reset -f
systemctl daemon-reload
systemctl restart kubelet
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
token=$(cat /root/create_info.list |awk '{print $5}')
crt=$(cat /root/create_info.list |awk '{print $NF}')
#echo "192.168.1.1  apiserver.demo" >> /etc/hosts
kubeadm join 192.168.1.1:6443 --token ${token}     --discovery-token-ca-cert-hash ${crt} 

master节点集成脚本

[root@master opt]# cat ${PWD}/master.sh 
#!/bin/bash
ip_local=$(ip addr |grep inet |grep -v inet6 |grep eth0|awk '{print $2}' |awk -F "/" '{print $1}')
cat /root/host.list >> /etc/hosts
sh  /root/limit.sh
sh  /root/k8s_docker.sh
sh  /root/k8s_nfs.sh 
sh  /root/k8s_sysbase.sh
sh  /root/init_master.sh

work节点集成脚本

[root@master opt]# cat ${PWD}/work.sh 
#!/bin/bash
ip_local=$(ip addr |grep inet |grep -v inet6 |grep eth0|awk '{print $2}' |awk -F "/" '{print $1}')
cat /root/host.list >> /etc/hosts
sh  /root/limit.sh
sh  /root/k8s_docker.sh
sh  /root/k8s_nfs.sh 
sh  /root/k8s_sysbase.sh
sh  /root/init_work.sh

安装完毕后,在master节点查看一下集群各节点状态:

[root@master opt]# kubectl get nodes
NAME     STATUS   ROLES    AGE     VERSION
master   Ready    master   1h   v1.15.1
node01   Ready    <none>   1h   v1.15.1
node02   Ready    <none>   1h   v1.15.1
一些个性化设置
kube-proxy选择ipvs模式

1.需在节点初始化前在每一台k8s节点上加载ipvs该内核模块

[root@master01 install_k8s]# yum install ipset ipvsadm
[root@master01 install_k8s]# cat ${PWD}/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4

[root@master01 install_k8s]# chmod 755 /etc/sysconfig/modules/ipvs.modules
[root@master01 install_k8s]# sh /etc/sysconfig/modules/ipvs.modules
[root@master01 install_k8s]# lsmod | grep ip_vs

2.在master节点执行kubeadm init时,修改初始化配置文件,以实现kube-proxy开启ipvs模式

[root@master01 opt]# kubeadm config print init-defaults > init.yaml

[root@master01 opt]# cat install_k8s/init.yaml 
...
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"

[root@master01 opt]# systemctl enable kubelet

[root@master01 opt]# kubeadm init --config init.yaml
...
[root@master01 opt]# ipvsadm -L
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  master-01:https rr
  -> master-01:sun-sr-https       Masq    1      4          0         
TCP  master-01:domain rr
  -> 192.168.184.66:domain        Masq    1      0          0         
  -> 192.168.184.67:domain        Masq    1      0          0         
TCP  master-01:9153 rr
  -> 192.168.184.66:9153          Masq    1      0          0         
  -> 192.168.184.67:9153          Masq    1      0          0         
TCP  master-01:ircu-2 rr
UDP  master-01:domain rr
  -> 192.168.184.66:domain        Masq    1      0          0         
  -> 192.168.184.67:domain        Masq    1      0          0         

3.在ipvs模式下,查看集群相关组件service的virtual-service

[root@master-01 opt]# kubectl get service -o wide -n kube-system
NAME          TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)                  AGE   SELECTOR
calico-etcd   ClusterIP   10.96.232.136   <none>        6666/TCP                 41m   k8s-app=calico-etcd
kube-dns      ClusterIP   10.96.0.10      <none>        53/UDP,53/TCP,9153/TCP   48m   k8s-app=kube-dns
[root@master-01 opt]# ip a |grep kube-ipvs0
5: kube-ipvs0: <BROADCAST,NOARP> mtu 1500 qdisc noop state DOWN group default 
    inet 10.96.0.1/32 brd 10.96.0.1 scope global kube-ipvs0
    inet 10.96.0.10/32 brd 10.96.0.10 scope global kube-ipvs0
    inet 10.96.232.136/32 brd 10.96.232.136 scope global kube-ipvs0
关于docker的优化配置
cat /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"oom-score-adjust": -1000,  #防止OOM评分机制将docker进程kill,所以设置成较小分数
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",    #docker的json日志每100m开始轮转
"max-file": "3"        #最大保留三份
},
"max-concurrent-downloads": 10, #最大同时下载任务数
"max-concurrent-uploads": 10, #每次推送的最大同时上传数
#"bip": "169.254.123.1/24",
"registry-mirrors": ["https://7bezldxe.mirror.aliyuncs.com"],  #镜像加速地址
"storage-driver": "overlay2",  #存储驱动更换为overlay2
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
关于集群重置reset
[root@master-01 install_k8s]# kubeadm reset 
[reset] Reading configuration from the cluster...
[reset] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.
[reset] Are you sure you want to proceed? [y/N]: y
[preflight] Running pre-flight checks
[reset] Removing info for node "master-01" from the ConfigMap "kubeadm-config" in the "kube-system" Namespace
W0318 16:18:38.220421   24890 removeetcdmember.go:61] [reset] failed to remove etcd member: error syncing endpoints with etc: etcdclient: no available endpoints
.Please manually remove this etcd member using etcdctl
[reset] Stopping the kubelet service
[reset] Unmounting mounted directories in "/var/lib/kubelet"
[reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
[reset] Deleting contents of stateful directories: [/var/lib/etcd /var/lib/kubelet /etc/cni/net.d /var/lib/dockershim /var/run/kubernetes]

The reset process does not reset or clean up iptables rules or IPVS tables.
If you wish to reset iptables, you must do so manually.
For example:
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X

If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar)
to reset your system's IPVS tables.

The reset process does not clean your kubeconfig files and you must remove them manually.
Please, check the contents of the $HOME/.kube/config file.

按照提示
1.如果kube-proxy使用iptables

[root@master01 install_k8s]# iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
[root@master01 install_k8s]# iptables -nL
Chain INPUT (policy ACCEPT)
target     prot opt source               destination         

Chain FORWARD (policy DROP)
target     prot opt source               destination         

Chain OUTPUT (policy ACCEPT)
target     prot opt source               destination         

2.如果kube-proxy使用ipvs

[root@master01 install_k8s]# ipvsadm -C
[root@master-01 install_k8s]# ipvsadm -L
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn

3.删除/etc/cni/net.d

[root@master01 install_k8s]# rm -rf /etc/cni/net.d

master也作为工作节点

kubectl taint nodes --all node-role.kubernetes.io/master-
kubernetes小版本升级

具体参考这篇文章,试了一下,是可以的

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 2
    评论
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值