kubeadm高可以用搭建

 常用命令

# kubectl apply -f calico.yaml

# watch kubectl get pods --all-namespaces

kubectl describe pod  calico-kube-controllers-6b8f6f78dc-m9gkf  -n kube-system

# master可调度
# kubectl taint node node01 node-role.kubernetes.io/master-

# master不可调度
# kubectl taint node node01 node-role.kubernetes.io/master="":NoSchedule

# $ kubectl create deployment web1 --image=nginx --dry-run -o yaml > deployment.yaml
# $ kubectl expose deployment web1 --port=80 --target-port=80 --type=NodePort --dry-run -o yaml > service.yaml

hostnamectl set-hostname master1

rm -rf  /var/lib/calico 
rm -rf /etc/cni/net.d/ 

ipvsadm -Ln

ipvsadm -C

# reset之后彻底清除上次初始化
kubeadm reset
rm -fr ~/.kube/
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X

第一步:系统初始化

 新建 init-system.sh 

#!/bin/bash

# 1. install common tools,these commands are not required.
source /etc/profile
yum -y install chrony bridge-utils chrony ipvsadm ipset sysstat conntrack libseccomp wget tcpdump screen vim nfs-utils bind-utils wget socat telnet sshpass net-tools sysstat lrzsz yum-utils device-mapper-persistent-data lvm2 tree nc lsof strace nmon iptraf iftop rpcbind mlocate

# 2. disable IPv6
if [ $(cat /etc/default/grub |grep 'ipv6.disable=1' |grep GRUB_CMDLINE_LINUX|wc -l) -eq 0 ];then
    sed -i 's/GRUB_CMDLINE_LINUX="/GRUB_CMDLINE_LINUX="ipv6.disable=1 /' /etc/default/grub
    /usr/sbin/grub2-mkconfig -o /boot/grub2/grub.cfg
fi

# 3. disable NetworkManager
systemctl stop NetworkManager
systemctl disable NetworkManager

# 3.
systemctl enable chronyd.service
systemctl start chronyd.service
# 4. add bridge-nf-call-ip6tables ,notice: You may need to run '/usr/sbin/modprobe br_netfilter' this commond after reboot.
cat > /etc/rc.sysinit << EOF
#!/bin/bash
for file in /etc/sysconfig/modules/*.modules ; do
[ -x $file ] && $file
done
EOF

cat > /etc/sysconfig/modules/br_netfilter.modules << EOF
modprobe br_netfilter
EOF

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
chmod 755 /etc/sysconfig/modules/br_netfilter.modules

# 5. add route forwarding
[ $(cat /etc/sysctl.conf | grep "net.ipv4.ip_forward=1" |wc -l) -eq 0 ] && echo "net.ipv4.ip_forward=1" >>/etc/sysctl.conf
[ $(cat /etc/sysctl.conf | grep "net.bridge.bridge-nf-call-iptables=1" |wc -l) -eq 0 ] && echo "net.bridge.bridge-nf-call-iptables=1" >>/etc/sysctl.conf
[ $(cat /etc/sysctl.conf | grep "net.bridge.bridge-nf-call-ip6tables=1" |wc -l) -eq 0 ] && echo "net.bridge.bridge-nf-call-ip6tables=1" >>/etc/sysctl.conf
[ $(cat /etc/sysctl.conf | grep "fs.may_detach_mounts=1" |wc -l) -eq 0 ] && echo "fs.may_detach_mounts=1" >>/etc/sysctl.conf
[ $(cat /etc/sysctl.conf | grep "vm.overcommit_memory=1" |wc -l) -eq 0 ] && echo "vm.overcommit_memory=1" >>/etc/sysctl.conf
[ $(cat /etc/sysctl.conf | grep "vm.panic_on_oom=0" |wc -l) -eq 0 ] && echo "vm.panic_on_oom=0" >>/etc/sysctl.conf
[ $(cat /etc/sysctl.conf | grep "vm.swappiness=0" |wc -l) -eq 0 ] && echo "vm.swappiness=0" >>/etc/sysctl.conf
[ $(cat /etc/sysctl.conf | grep "fs.inotify.max_user_watches=89100" |wc -l) -eq 0 ] && echo "fs.inotify.max_user_watches=89100" >>/etc/sysctl.conf
[ $(cat /etc/sysctl.conf | grep "fs.file-max=52706963" |wc -l) -eq 0 ] && echo "fs.file-max=52706963" >>/etc/sysctl.conf
[ $(cat /etc/sysctl.conf | grep "fs.nr_open=52706963" |wc -l) -eq 0 ] && echo "fs.nr_open=52706963" >>/etc/sysctl.conf
[ $(cat /etc/sysctl.conf | grep "net.netfilter.nf_conntrack_max=2310720" |wc -l) -eq 0 ] && echo "net.netfilter.nf_conntrack_max=2310720" >>/etc/sysctl.conf
/usr/sbin/sysctl -p


# 6. modify limit file
[ $(cat /etc/security/limits.conf|grep '* soft nproc 10240000'|wc -l) -eq 0 ]&&echo '* soft nproc 10240000' >>/etc/security/limits.conf
[ $(cat /etc/security/limits.conf|grep '* hard nproc 10240000'|wc -l) -eq 0 ]&&echo '* hard nproc 10240000' >>/etc/security/limits.conf
[ $(cat /etc/security/limits.conf|grep '* soft nofile 10240000'|wc -l) -eq 0 ]&&echo '* soft nofile 10240000' >>/etc/security/limits.conf
[ $(cat /etc/security/limits.conf|grep '* hard nofile 10240000'|wc -l) -eq 0 ]&&echo '* hard nofile 10240000' >>/etc/security/limits.conf

# 6. disable selinux
sed -i '/SELINUX=/s/enforcing/disabled/' /etc/selinux/config

# 6. Close the swap partition
/usr/sbin/swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab

# 7. disable firewalld
systemctl stop firewalld
systemctl disable firewalld

# 8. reset iptables
yum install -y iptables-services
/usr/sbin/iptables -P FORWARD ACCEPT
/usr/sbin/iptables -X
/usr/sbin/iptables -F -t nat
/usr/sbin/iptables -X -t nat


cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF


cat > /etc/sysctl.d/k8s.conf <<EOF 
net.ipv6.conf.all.disable_ipv6 = 1           #禁用ipv6
net.ipv6.conf.default.disable_ipv6 = 1       #禁用ipv6
net.ipv6.conf.lo.disable_ipv6 = 1            #禁用ipv6
net.ipv4.neigh.default.gc_stale_time = 120   #决定检查过期多久邻居条目
net.ipv4.conf.all.rp_filter = 0              #关闭反向路由校验
net.ipv4.conf.default.rp_filter = 0          #关闭反向路由校验
net.ipv4.conf.default.arp_announce = 2       #始终使用与目标IP地址对应的最佳本地IP地址作为ARP请求的源IP地址
net.ipv4.conf.lo.arp_announce = 2            #始终使用与目标IP地址对应的最佳本地IP地址作为ARP请求的源IP地址
net.ipv4.conf.all.arp_announce = 2           #始终使用与目标IP地址对应的最佳本地IP地址作为ARP请求的源IP地址
net.ipv4.ip_forward = 1                      #启用ip转发功能
net.ipv4.tcp_max_tw_buckets = 5000           #表示系统同时保持TIME_WAIT套接字的最大数量
net.ipv4.tcp_syncookies = 1                  #表示开启SYN Cookies。当出现SYN等待队列溢出时,启用cookies来处理
net.ipv4.tcp_max_syn_backlog = 1024          #接受SYN同包的最大客户端数量
net.ipv4.tcp_synack_retries = 2              #活动TCP连接重传次数
net.bridge.bridge-nf-call-ip6tables = 1      #要求iptables对bridge的数据进行处理
net.bridge.bridge-nf-call-iptables = 1       #要求iptables对bridge的数据进行处理
net.bridge.bridge-nf-call-arptables = 1      #要求iptables对bridge的数据进行处理
net.netfilter.nf_conntrack_max = 2310720     #修改最大连接数
fs.inotify.max_user_watches=89100            #同一用户同时可以添加的watch数目
fs.may_detach_mounts = 1                     #允许文件卸载
fs.file-max = 52706963                       #系统级别的能够打开的文件句柄的数量
fs.nr_open = 52706963                        #单个进程可分配的最大文件数
vm.overcommit_memory=1                       #表示内核允许分配所有的物理内存,而不管当前的内存状态如何
vm.panic_on_oom=0                            #内核将检查是否有足够的可用内存供应用进程使用
vm.swappiness = 0                            #关注swap
net.ipv4.tcp_keepalive_time = 600            #修复ipvs模式下长连接timeout问题,小于900即可
net.ipv4.tcp_keepalive_intvl = 30            #探测没有确认时,重新发送探测的频度
net.ipv4.tcp_keepalive_probes = 10           #在认定连接失效之前,发送多少个TCP的keepalive探测包
vm.max_map_count=524288                      #定义了一个进程能拥有的最多的内存区域
EOF

sysctl --system

reboot

 第二步:初始化docker

新建init-docker.sh

#!/bin/bash
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo &&yum makecache && yum -y install docker-ce-18.06.3.ce 


systemctl daemon-reload
systemctl enable docker.service
systemctl start docker.service

sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
    "log-driver": "json-file",
    "exec-opts": ["native.cgroupdriver=systemd"],
    "log-opts": {
    "max-size": "100m",
    "max-file": "3"
    },
    "live-restore": true,
    "max-concurrent-downloads": 10,
    "max-concurrent-uploads": 10,
    "registry-mirrors": ["https://3clvvukq.mirror.aliyuncs.com"],
    "storage-driver": "overlay2",
    "storage-opts": [
    "overlay2.override_kernel_check=true"
    ]
}
EOF
sudo systemctl daemon-reload && sudo systemctl restart docker

第三步:搭建ETCD

1.创建ca证书,init-etcd-ca-1.sh(只在第一台机器执行)cfssl:下载地址 https://download.csdn.net/download/hao20863/63222600

#!/bin/bash

# 1. download cfssl related files.
# while true;
# do
#         echo "Download cfssl, please wait a monment." &&\
#         curl -L -C - -O https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 && \
#         curl -L -C - -O https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 && \
#         curl -L -C - -O https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
#         if [ $? -eq 0 ];then
#                 echo "cfssl download success."
#                 break
#         else
#                 echo "cfssl download failed."
#                 break
#         fi
# done

# 2. Create a binary dirctory to store kubernetes related files.
if [ ! -d /usr/kubernetes/bin/ ];then
        mkdir -p /usr/kubernetes/bin/
fi

# 3. copy binary files to before create a binary dirctory.
mv cfssl /usr/kubernetes/bin/
mv cfssljson /usr/kubernetes/bin/
mv cfssl-certinfo /usr/kubernetes/bin/
chmod +x /usr/kubernetes/bin/{cfssl,cfssljson,cfssl-certinfo}

# 4. add environment variables
[ $(cat /etc/profile|grep 'PATH=/usr/kubernetes/bin'|wc -l ) -eq 0 ] && echo 'PATH=/usr/kubernetes/bin:$PATH' >>/etc/profile && source /etc/profile || source /etc/profile

# 5. create a CA certificate directory and access this directory
CA_SSL=/etc/kubernetes/ssl/ca
[ ! -d ${CA_SSL} ] && mkdir -p ${CA_SSL}
cd $CA_SSL

## cfssl print-defaults config > config.json
## cfssl print-defaults csr > csr.json
# 我们这里不使用上面两行命令生成

# 可以定义多个profiles,分别指定不同的过期时间,使用场景等参数,后续签名证书时使用某个profile;
# signing: 表示该证书可用于签名其它证书,生成的ca.pem证书中的CA=TRUE;
# server auth: 表示client 可以用该CA 对server 提供的证书进行校验;
# client auth: 表示server 可以用该CA 对client 提供的证书进行验证。
cat > ${CA_SSL}/ca-config.json << EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
         "expiry": "87600h",
         "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ]
      }
    }
  }
}
EOF

# CN: Common Name, kube-apiserver从证书中提取该字段作为请求的用户名(User Name);浏览器使用该字段验证网站是否合法;
# O: Organization,kube-apiserver 从证书中提取该字段作为请求用户所属的组(Group);

cat > ${CA_SSL}/ca-csr.json <<EOF
{
    "CN": "etcd CA",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
            "O": "k8s",
           "OU": "System"
        }
    ]
}
EOF

# 6. generate ca.pem, ca-key.pem
cfssl gencert -initca ca-csr.json | cfssljson -bare ca

[ $? -eq 0 ] && echo "CA certificate and private key generated successfully." || echo "CA certificate and private key generation failed."

2.创建init-etcd-ca-2.sh,记得修改hosts里的ip只在第一台机器执行

#!/bin/bash

# 2. create csr file.
source /etc/profile


ETCD_SSL="/etc/kubernetes/ssl/etcd/"

[ ! -d ${ETCD_SSL} ] && mkdir ${ETCD_SSL}
cat >$ETCD_SSL/etcd-csr.json << EOF
{
    "CN": "etcd",
    "hosts": [
    "192.168.66.10",
    "192.168.66.11",
    "192.168.66.20"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Beijing",
            "ST": "Beijing",
           "O": "k8s",
           "OU": "System"
        }
    ]
}
EOF

# 3. Determine if the ca required file exits.
[ ! -f /etc/kubernetes/ssl/ca/ca.pem ] && echo "no ca.pem file." && exit 0
[ ! -f /etc/kubernetes/ssl/ca/ca-key.pem ] && echo "no ca-key.pem file" && exit 0
[ ! -f /etc/kubernetes/ssl/ca/ca-config.json ] && echo "no ca-config.json file" && exit 0

# 4. generate etcd private key and public key.
cd $ETCD_SSL
cfssl gencert -ca=/etc/kubernetes/ssl/ca/ca.pem \
  -ca-key=/etc/kubernetes/ssl/ca/ca-key.pem \
  -config=/etc/kubernetes/ssl/ca/ca-config.json \
  -profile=kubernetes etcd-csr.json | cfssljson -bare etcd

[ $? -eq 0 ] && echo "Etcd certificate and private key generated successfully." || echo "Etcd certificate and private key generation failed."

3.复制密钥到其他节点(只在第一台机器执行

#/bin/bash
scp -r /etc/kubernetes 192.168.66.11:/etc/
scp -r /etc/kubernetes 192.168.66.20:/etc/

4.创建init-etcd.sh,记得修改dict里的ip每台机器执行

#!/bin/bash

# 1. env info
source /etc/profile
declare -A dict

dict=(['etcd01']=192.168.200.100 ['etcd02']=192.168.200.101 ['etcd03']=192.168.200.110)
IP=`ip a |grep inet|grep -v 127.0.0.1|grep -v 172.17|gawk -F/ '{print $1}'|gawk '{print $NF}'`
#IP=`ip a |grep inet|grep -v 127.0.0.1|gawk -F/ '{print $1}'|gawk '{print $NF}'`

for key in $(echo ${!dict[*]})
do
    if [[ "$IP" == "${dict[$key]}" ]];then
        LOCALIP=$IP
        LOCAL_ETCD_NAME=$key
    fi
done

if [[ "$LOCALIP" == "" || "$LOCAL_ETCD_NAME" == "" ]];then
    echo "Get localhost IP failed." && exit 1
fi

# 2. download etcd source code and decompress.
CURRENT_DIR=`pwd`
cd $CURRENT_DIR
# curl -L -C - -O https://github.com/etcd-io/etcd/releases/download/v3.3.18/etcd-v3.3.18-linux-amd64.tar.gz
#( [ $? -eq 0 ] && echo "etcd source code download success." ) || ( echo "etcd source code download failed." && exit 1 )

/usr/bin/tar -zxf etcd-v3.3.18-linux-amd64.tar.gz
cp etcd-v3.3.18-linux-amd64/etc* /usr/local/bin/
#rm -rf etcd-v3.3.18-linux-amd64*

# 3. deploy etcd config and enable etcd.service.

ETCD_SSL="/etc/kubernetes/ssl/etcd/"
ETCD_CONF=/etc/etcd/etcd.conf
ETCD_SERVICE=/usr/lib/systemd/system/etcd.service

[ ! -d /data/etcd/ ] && mkdir -p /data/etcd/
[ ! -d /etc/etcd/ ] && mkdir -p /etc/etcd/

# 3.1 create /etc/etcd/etcd.conf configure file.
cat > $ETCD_CONF << EOF
#[Member]
ETCD_NAME="${LOCAL_ETCD_NAME}"
ETCD_DATA_DIR="/data/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://${LOCALIP}:2380"
ETCD_LISTEN_CLIENT_URLS="https://${LOCALIP}:2379"
ETCD_LISTEN_CLIENT_URLS2="http://127.0.0.1:2379"

#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://${LOCALIP}:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://${LOCALIP}:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://${dict['etcd01']}:2380,etcd02=https://${dict['etcd02']}:2380,etcd03=https://${dict['etcd03']}:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF

# 3.2 create etcd.service
cat>$ETCD_SERVICE<<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target


[Service]
Type=notify
EnvironmentFile=$ETCD_CONF
ExecStart=/usr/local/bin/etcd \
--name=\${ETCD_NAME} \
--data-dir=\${ETCD_DATA_DIR} \
--listen-peer-urls=\${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS},\${ETCD_LISTEN_CLIENT_URLS2} \
--advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=\${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=\${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=/etc/kubernetes/ssl/etcd/etcd.pem \
--key-file=/etc/kubernetes/ssl/etcd/etcd-key.pem \
--peer-cert-file=/etc/kubernetes/ssl/etcd/etcd.pem \
--peer-key-file=/etc/kubernetes/ssl/etcd/etcd-key.pem \
--trusted-ca-file=/etc/kubernetes/ssl/ca/ca.pem \
--peer-trusted-ca-file=/etc/kubernetes/ssl/ca/ca.pem
Restart=on-failure
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

# 4. enable etcd.service and start
systemctl daemon-reload
systemctl enable etcd.service
systemctl start etcd.service
systemctl status etcd.service

5.检查etcd 集群正常,记得修改dict里的ip

#!/bin/bash
declare -A dict
dict=(['etcd01']=192.168.66.10 ['etcd02']=192.168.66.11 ['etcd03']=192.168.66.20)

cd /usr/local/bin
etcdctl --ca-file=/etc/kubernetes/ssl/ca/ca.pem \
--cert-file=/etc/kubernetes/ssl/etcd/etcd.pem \
--key-file=/etc/kubernetes/ssl/etcd/etcd-key.pem \
--endpoints="https://${dict['etcd01']}:2379,https://${dict['etcd02']}:2379,https://${dict['etcd03']}:2379" cluster-health

etcdctl --ca-file=/etc/kubernetes/ssl/ca/ca.pem \
--cert-file=/etc/kubernetes/ssl/etcd/etcd.pem \
--key-file=/etc/kubernetes/ssl/etcd/etcd-key.pem \
--endpoints="https://${dict['etcd01']}:2379,https://${dict['etcd02']}:2379,https://${dict['etcd03']}:2379" member list

第四步:配置主机名和路由

坑点:hostname 和 DNS 的解析规则不匹配导致报错,原因是 hostname 和 DNS 的解析规则不匹配, 不能有除-和 . 之外的特殊字符存在

hostnamectl set-hostname  master1
hostnamectl set-hostname  node1
hostnamectl set-hostname  node2
cat >> /etc/hosts <<EOF
192.168.66.100  kub-master
192.168.66.101  kub-node1
192.168.66.102  kub-node2
EOF

第五步:配置Keepalived和haproxy(在master节点执行)

1.新建start-haproxy.sh

#!/bin/bash

# 修改为你自己的 Master 地址
MasterIP1=192.168.66.10
MasterIP2=192.168.66.11
# 这是 kube-apiserver 默认端口,不用修改
MasterPort=6443

# 容器将 HAProxy 的 6444 端口暴露出去
docker run -d --restart=always --name HAProxy-K8S -p 6444:6444 \
        -e MasterIP1=$MasterIP1 \
        -e MasterIP2=$MasterIP2 \
        -e MasterPort=$MasterPort \
        wise2c/haproxy-k8s

1.新建start-keepalived.sh(VIRTUAL_IP和INTERFACE 修改

#!/bin/bash
# 修改为你自己的虚拟 IP 地址
VIRTUAL_IP=192.168.66.200
# 虚拟网卡设备名
INTERFACE=ens33
# 虚拟网卡的子网掩码
NETMASK_BIT=24
# HAProxy 暴露端口,内部指向 kube-apiserver 的 6443 端口
CHECK_PORT=6444
# 路由标识符
RID=10
# 虚拟路由标识符
VRID=160
# IPV4 多播地址,默认 224.0.0.18
MCAST_GROUP=224.0.0.18

docker run -itd --restart=always --name=Keepalived-K8S \
        --net=host --cap-add=NET_ADMIN \
        -e VIRTUAL_IP=$VIRTUAL_IP \
        -e INTERFACE=$INTERFACE \
        -e CHECK_PORT=$CHECK_PORT \
        -e RID=$RID \
        -e VRID=$VRID \
        -e NETMASK_BIT=$NETMASK_BIT \
        -e MCAST_GROUP=$MCAST_GROUP \
        wise2c/keepalived-k8s

第六步:

1.配置kubernetes阿里镜像

cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

2.下载(每台机器执行)

yum install -y \
    kubeadm-1.18.2 \
    kubectl-1.18.2 \
    kubelet-1.18.2 \
    --disableexcludes=kubernetes && \
    systemctl enable kubelet

3.修改kubelet配置禁用swap

# vim /etc/sysconfig/kubelet
# KUBELET_EXTRA_ARGS="--fail-swap-on=false"

4.设置开机启动

systemctl enable kubelet.service

5.新建kubeadm-config.yaml (修改controlPlaneEndpoint和etcd 地址

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
#  advertiseAddress: 192.168.66.10   # 本机ip
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: master1  # 本机 hostname
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.66.200:6444"  # vip地址
controllerManager: {}
dns:
  type: CoreDNS
etcd:
#  local:
#    dataDir: /var/lib/etcd
  external:
    endpoints:
    - https://192.168.66.10:2379
    - https://192.168.66.11:2379
    - https://192.168.66.20:2379
    caFile: /etc/kubernetes/ssl/ca/ca.pem
    certFile: /etc/kubernetes/ssl/etcd/etcd.pem
    keyFile: /etc/kubernetes/ssl/etcd/etcd-key.pem
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers # 国内不能访问 Google,修改为阿里云
kind: ClusterConfiguration
kubernetesVersion: v1.19.0
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16 # 配置成 Calico 的默认网段
  serviceSubnet: 10.1.0.0/12
scheduler: {}
---
# 开启 IPVS 模式
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs

6.执行

kubeadm config images pull --config kubeadm-config.yaml

kubeadm init --config=kubeadm-config.yaml --v=5  | tee kubeadm-init.log

7.复制pki 到master所有节点


scp -r  /etc/kubernetes/pki/{ca.crt,ca.key,sa.key,sa.pub,front-proxy-ca.crt,front-proxy-ca.key} 192.168.66.11:/etc/kubernetes/pki

8.配置calico.yaml

下载地址 https://download.csdn.net/download/hao20863/63221072

kubectl apply -f calico.yaml

完整脚本下载地址: https://download.csdn.net/download/hao20863/63233077

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值