快速部署 Kubeadm 1.13 集群ETCD

转自同事脚本 ,部分根据集群微调

 

软件环境清单

kubeadm.x86_64  Version :1.13.1-0

kubelet.x86_64 Version : 1.13-1-0

kubectl.x86_64 Version : 1.13-1-0

kubernetes-cni.x86_64  Version : 0.6.0-0

docker  Version 18.06.1-ce

 

一、更改系统部署参数

# 安装Wget
yum install wget -y

# 关闭 firewalld 防火墙
systemctl stop firewalld && systemctl disable firewalld


# 关闭交换分区
swapoff -a  &&  sed -i 's/.*swap.*/#&/' /etc/fstab

# 禁用selinux
setenforce  0 
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/sysconfig/selinux 
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config 
sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/sysconfig/selinux 
sed -i "s/^SELINUX=permissive/SELINUX=disabled/g" /etc/selinux/config 

# 内核优化包括支持ipvs CNI
cat <<EOF >  /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
EOF
sysctl -p /etc/sysctl.conf

# 添加master节点,如果是集群需要填写多个,否则会出现找到主机
sed -i '$a\192.168.100.41 k8s-host1' /etc/hosts
sed -i '$a\192.168.100.42 k8s-host2' /etc/hosts
sed -i '$a\192.168.100.43 k8s-host3' /etc/hosts

# 安装ipvs 服务    
yum install ipvsadm ipset sysstat conntrack libseccomp -y

# 安装keepalived
yum install keepalived -y

# 加载ipvs模块
:> /etc/modules-load.d/ipvs.conf
module=(
  ip_vs
  ip_vs_lc
  ip_vs_wlc
  ip_vs_rr
  ip_vs_wrr
  ip_vs_lblc
  ip_vs_lblcr
  ip_vs_dh
  ip_vs_sh
  ip_vs_fo
  ip_vs_nq
  ip_vs_sed
  ip_vs_ftp
  )
for kernel_module in ${module[@]};do
    /sbin/modinfo -F filename $kernel_module |& grep -qv ERROR && echo $kernel_module >> /etc/modules-load.d/ipvs.conf || :
done

# 启动并开启即启动
systemctl enable --now systemd-modules-load.service
# 修改默认的yum源并新增k8s源
mkdir -p /etc/yum.repos.d/bak
mv /etc/yum.repos.d/CentOS* /etc/yum.repos.d/bak
wget  -P /etc/yum.repos.d/ http://mirrors.aliyun.com/repo/Centos-7.repo 
wget  -P /etc/yum.repos.d/ http://mirrors.aliyun.com/repo/epel-7.repo
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
       http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 修改系统默认文件打开数
echo "* soft nofile 65536" >> /etc/security/limits.conf
echo "* hard nofile 65536" >> /etc/security/limits.conf
echo "* soft nproc 65536"  >> /etc/security/limits.conf
echo "* hard nproc 65536"  >> /etc/security/limits.conf
echo "* soft  memlock  unlimited"  >> /etc/security/limits.conf
echo "* hard memlock  unlimited"  >> /etc/security/limits.conf

# 安装k8s 
yum install -y kubelet-1.13* kubernetes-cni-0.6.0-0.x86_64 kubeadm-1.13* kubectl-1.13* --disableexcludes=kubernetes

# 安装同步时间工具,确保所有node时间一致(非常重要,后面会用到)
yum install chrony -y

# 安装docker,docker版本选择k8s官方推荐的版本. 
#curl https://releases.rancher.com/install-docker/18.06.sh | sh
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install docker-ce-18.06* -y


# docker 自动补全
yum install -y epel-release bash-completion && cp /usr/share/bash-completion/completions/docker /etc/bash_completion.d/
systemctl enable --now docker
systemctl enable chronyd.service    
systemctl start chronyd.service 
# 添加pause镜像下载地址,如果FQ可以忽略
sed -i '9a\Environment="KUBELET_EXTRA_ARGS=--pod-infra-container-image=registry.aliyuncs.com/google_containers/pause-amd64:3.1"' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
sed -i 's/ExecStart=\/usr\/bin\/kubelet/ExecStart=\/usr\/bin\/kubelet \$KUBELET_EXTRA_ARGS /g' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf

# 重启服务

systemctl enable --now docker
systemctl enable --now kubelet
systemctl restart docker
systemctl restart kubelet

# 重启机器
reboot

 

二、配置 master config文件

init 地址:https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/

1.13主要是优化了kubeadm这一块,把配置分成了很多个接口,目前只有InitConfiguration和ClusterConfiguration是beta,其余都是v1alpha1,能不用就别用,官方也已注明

# 1.12版本 通过kubeadm config print-defaults 会将默认的所有配置打印到终端

# 1.13版本 kubeadm config print init-defaults 也会打印,但没有上面那个圈,部分被隐藏了

 

先安装nginx,即创建外部kube_server_proxy

docker run --restart=always -v /etc/nginx/nginx.conf:/etc/nginx/nginx.conf --name kube_server_proxy --net host -it -d nginx

 

nginx.conf

worker_processes auto;
user root;
events {
    worker_connections  20240;
    use epoll;
}
error_log /var/log/nginx_error.log info;

stream {
    upstream kube-servers {
        hash  consistent;
        server k8s-host1:6443 weight=5 max_fails=1 fail_timeout=10s;
        server k8s-host2:6443 weight=5 max_fails=1 fail_timeout=10s;
        server k8s-host3:6443 weight=5 max_fails=1 fail_timeout=10s;
    }

    server {
        listen 8443;
        proxy_connect_timeout 30s;
        proxy_timeout 60s;
        proxy_pass kube-servers;
    }
}

 

#如果你配置了VIP地址请将41改为VIP

vim kubeadm-config-init.yaml

apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
 
---
 
# master 集群 地址池+VIP地址
apiServer:
  certSANs:
  - 192.168.100.45
  - 192.168.100.41
  - 192.168.100.42
  - 192.168.100.43
  - k8s-host1
  - k8s-host2
  - k8s-host3
  - k8svip.com
  extraArgs:
    authorization-mode: Node,RBAC
    advertise-address: 0.0.0.0
controlPlaneEndpoint: "k8svip.com:7443"
controllerManager:
  extraArgs:
    address: 0.0.0.0
scheduler:
  extraArgs:
    address: 0.0.0.0
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
dns:
  type: CoreDNS
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.13.1
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16
  serviceSubnet: 10.96.0.0/12
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"

如果是想独立部署ETCD,在kubeadm-config-init.yaml增加以下

来自于:https://kubernetes.io/docs/setup/independent/high-availability/?tdsourcetag=s_pcqq_aiomsg

apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
kubernetesVersion: stable
apiServer:
  certSANs:
  - "LOAD_BALANCER_DNS"
controlPlaneEndpoint: "LOAD_BALANCER_DNS:LOAD_BALANCER_PORT"
etcd:
    external:
        endpoints:
        - https://ETCD_0_IP:2379
        - https://ETCD_1_IP:2379
        - https://ETCD_2_IP:2379
        caFile: /etc/kubernetes/pki/etcd/ca.crt
        certFile: /etc/kubernetes/pki/apiserver-etcd-client.crt
        keyFile: /etc/kubernetes/pki/apiserver-etcd-client.key

 

三、部署服务

kubeadm init --config kubeadm-config-init.yaml

等待即可,所有镜像均来自阿里云镜像仓库

如提示,请执行

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

最后一步,安装网络组建,这里我是使用的flannel,以前的版本镜像因为要FQ无法下载,现在官方已作了修改,所以可以直接使用

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml
# 更多网络组建可以在这里查看
https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/

如果机器多且网络资源较多的可以考虑安装calico

kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml

wget https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml

vim calico.yaml

将192.168.0.0/16改为10.244.0.0/16

最后,耐心等待一会

上面已部署完单机master节点

现在我们想让k8s实现高可用并且其余etcd,controller-manager,scheduler各自通过选举进行调度,我们可以这样:

 

kubeadm join k8svip.com:7443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:3093001b7ef70903a5c22954b3b3c72a1f9d33b6c28d41e92a580cf60dd5ac2d

这条命令如果有使用的应当清楚是让新的机器加入到k8s当中并充当worker nodes,只需要在后面跟上--experimental-control-plane 就能让该节点部署master上的所有组件包括etcd,如果在第一次配置中指定etcd配置(外部部署etcd),那么会直接跳过

如果你忘记了上述命令,可以通过 kubeadm token create --print-join-command 来获取,所以

1. 新机器加入到master充当worker nodes

kubeadm join k8svip.com:7443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:3093001b7ef70903a5c22954b3b3c72a1f9d33b6c28d41e92a580cf60dd5ac2d

2. 新机器加入到master充当 master nodes

# 192.168.100.42 和 192.168.100.43 是另外两个master节点

mkdir -p /etc/kubernetes/pki/etcd/ && mkdir -p ~/.kube


可以提前做好免秘钥认证
cd /etc/kubernetes/pki/
scp ca.crt ca.key sa.key sa.pub front-proxy-ca.crt front-proxy-ca.key root@192.168.100.42:/etc/kubernetes/pki/
scp ca.crt ca.key sa.key sa.pub front-proxy-ca.crt front-proxy-ca.key root@192.168.100.43:/etc/kubernetes/pki/

cd /etc/kubernetes/pki/etcd
scp ca.crt ca.key root@192.168.100.42:/etc/kubernetes/pki/etcd/
scp ca.crt ca.key root@192.168.100.43:/etc/kubernetes/pki/etcd/

cd /etc/kubernetes/
scp admin.conf root@192.168.100.42:/etc/kubernetes/
scp admin.conf root@192.168.100.43:/etc/kubernetes/
scp admin.conf root@192.168.100.42:~/.kube/config
scp admin.conf root@192.168.100.43:~/.kube/config


kubeadm join k8svip.com:7443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:3093001b7ef70903a5c22954b3b3c72a1f9d33b6c28d41e92a580cf60dd5ac2d --experimental-control-plane

## PS:

192.168.100.40 是因为我没有去安装VIP,所以直接以我主机的IP来显示了,如果你要搭建完整的高可用,请务必将该地址替换成VIP地址,包括上面的所有配置文件

  • 2
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值