k8s---高可用搭建

1upgrade_the_kernel_allnode.sh

#!/bin/bash

if [ $UID -ne 0 ]; then
  echo "本脚本需要使用root用户执行" 
  exit 0
fi

echo "-----------------升级kernel-----------------------"
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
#yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
yum --enablerepo=elrepo-kernel install kernel-lt -y
rpm -qa | grep  kernel.*-3 | awk '{print "yum remove -y "$1}' | sh -x
awk -F\' '$1=="menuentry " {print $2}' /etc/grub2.cfg
sed -i "s/GRUB_DEFAULT=.*/GRUB_DEFAULT=0/g" /etc/default/grub
sed -i "s/GRUB_TIMEOUT=5/GRUB_TIMEOUT=0/g" /etc/default/grub

grep numa=off /etc/default/grub 
if [ $? -ne 0 ]; then  
  grep -n GRUB_CMDLINE_LINUX=\".*\" /etc/default/grub | awk -F '[:\"]' '{print "sed -i \""$1"c GRUB_CMDLINE_LINUX=\\\""$3" numa=off\\\"\" /etc/default/grub"}' | sh -x
fi

grub2-mkconfig -o /boot/grub2/grub.cfg
echo "正在重启。。。"
reboot

#第一次运作脚本时正在运行的内核无法删除
#重启后新内核应该已经再运行了
#可手动删除旧内核,也可以再执行一次脚本即可删除旧内核

 

 

2setup_base_env_allnode.sh

#!/bin/bash

if [ $UID -ne 0 ]; then
  echo "本脚本需要使用root用户执行" 
  exit 0
fi

#yum源
curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
rm -rf /var/cache/yum && yum makecache && yum -y update && yum -y autoremove

#时间
timedatectl set-timezone Asia/Shanghai
timedatectl set-local-rtc 0
yum install -y ntpdate
ntpdate -u ntp.api.bz
systemctl enable ntpd
systemctl restart ntpd
systemctl restart rsyslog
systemctl restart crond

#防火墙和iptables
systemctl stop firewalld 
systemctl disable firewalld
iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat && iptables -P FORWARD ACCEPT

#selinux关闭
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config

#swap关闭
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

#句柄数
ulimit -n 65535

#ipvs开启
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
modprobe -- br_netfilter
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules 
bash /etc/sysconfig/modules/ipvs.modules

#内核参数设置
cat << EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.ipv6.conf.default.disable_ipv6 = 1
net.netfilter.nf_conntrack_max=2310720
EOF
sysctl -p /etc/sysctl.d/k8s.conf

#日志设置
mkdir /var/log/journal
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent   

# 压缩历史日志
Compress=yes    
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000    

# 最大占用空间 10G
SystemMaxUse=10G     

# 单日志文件最大 200M
SystemMaxFileSize=200M    

# 日志保存时间 2 周
MaxRetentionSec=2week    

# 不将日志转发到 syslog
ForwardToSyslog=no
EOF
systemctl restart systemd-journald

#停止无关服务
systemctl stop postfix 
systemctl disable postfix


yum install -y epel-release
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget lsof telnet vim net-tools

 

 

3setup_ssh.sh

#!/bin/bash

if [ $UID -ne 0 ]; then
  echo "本脚本需要使用root用户执行" 
  exit 0
fi

source ./configrc

for idx in ${!NODE_HOSTNAMES[@]}
do
  _name=${NODE_HOSTNAMES[$idx]} 
  _ip=${NODE_IPS[$idx]} 

  echo "$_ip $_name" >> /etc/hosts
done

ssh-keygen -t rsa
for idx in ${!NODE_HOSTNAMES[@]}
do
  _name=${NODE_HOSTNAMES[$idx]} 
  echo "ssh-copy-id root@$_name" | sh -x
done

 

4setup_ca_certs.sh

#!/bin/bash

#只需在一个节点上运行

if [ $UID -ne 0 ]; then
  echo "本脚本需要使用root用户执行" 
  exit 0
fi

source ./configrc
mkdir -p /opt/k8s/{bin,cert,work}

#---------------下载ssl工具--------------------
if [ ! -f "/opt/k8s/bin/cfssl" ];then
  cd /opt/k8s
  wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
  mv /opt/k8s/cfssl_linux-amd64 /opt/k8s/bin/cfssl

  wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
  mv /opt/k8s/cfssljson_linux-amd64 /opt/k8s/bin/cfssljson

  wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
  mv /opt/k8s/cfssl-certinfo_linux-amd64 /opt/k8s/bin/cfssl-certinfo
fi

chmod +x /opt/k8s/bin/*
export PATH=/opt/k8s/bin:$PATH

#---------------生成ca证书--------------------
cd /opt/k8s/work
cat > ca-config.json <<EOF
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "87600h"
      }
    }
  }
}
EOF

cd /opt/k8s/work
cat > ca-csr.json <<EOF
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "4Paradigm"
    }
  ],
  "ca": {
    "expiry": "876000h"
 }
}
EOF

cd /opt/k8s/work
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
ls ca*

#分发到其它节点master and worker
cd /opt/k8s/work
for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "rm -rf /etc/kubernetes"
  ssh root@${ip} "mkdir -p /etc/kubernetes/cert"
  scp ca*.pem ca-config.json root@${ip}:/etc/kubernetes/cert
done

 

5setup_kubectl.sh

#!/bin/bash

#只需在一个节点上运行

if [ $UID -ne 0 ]; then
  echo "本脚本需要使用root用户执行" 
  exit 0
fi

source ./configrc

if [ ! -f "/opt/k8s/work/kubernetes-client-linux-amd64.tar.gz" ];then
  echo "需要先上传文件/opt/k8s/work/kubernetes-client-linux-amd64.tar.gz"
  exit 0
fi

cd /opt/k8s/work
tar -xzvf kubernetes-client-linux-amd64.tar.gz

for node_ip in ${MASTER_IPS[@]}
  do
    echo ">>> ${node_ip}"
    ssh root@${node_ip} "mkdir -p /opt/k8s/bin"
    scp kubernetes/client/bin/kubectl root@${node_ip}:/opt/k8s/bin/
    ssh root@${node_ip} "chmod +x /opt/k8s/bin/*"
  done

cat > admin-csr.json <<EOF
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "system:masters",
      "OU": "4Paradigm"
    }
  ]
}
EOF

cd /opt/k8s/work
cfssl gencert -ca=/opt/k8s/work/ca.pem \
  -ca-key=/opt/k8s/work/ca-key.pem \
  -config=/opt/k8s/work/ca-config.json \
  -profile=kubernetes admin-csr.json | cfssljson -bare admin
ls admin*

#设置集群参数
kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/k8s/work/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kubectl.kubeconfig

#设置客户端认证参数
kubectl config set-credentials admin \
  --client-certificate=/opt/k8s/work/admin.pem \
  --client-key=/opt/k8s/work/admin-key.pem \
  --embed-certs=true \
  --kubeconfig=kubectl.kubeconfig

#设置上下文参数
kubectl config set-context kubernetes \
  --cluster=kubernetes \
  --user=admin \
  --kubeconfig=kubectl.kubeconfig

#设置默认上下文
kubectl config use-context kubernetes --kubeconfig=kubectl.kubeconfig

#分发到其它节点
cd /opt/k8s/work
for node_ip in ${NODE_IPS[@]}
do
  echo ">>> ${node_ip}"
  ssh root@${node_ip} "mkdir -p ~/.kube"
  scp kubectl.kubeconfig root@${node_ip}:~/.kube/config
done

 

6setup_etcd.sh

#!/bin/bash

#只需在一个节点上运行

if [ $UID -ne 0 ]; then
  echo "本脚本需要使用root用户执行" 
  exit 0
fi

source ./configrc

if [ ! -f "/opt/k8s/work/etcd-v3.3.13-linux-amd64.tar.gz" ];then
  echo "需要先上传相关二进制文件" 
  exit 0
fi

cd /opt/k8s/work
tar -zxvf etcd-v3.3.13-linux-amd64.tar.gz

echo "----------分发binary文件-----------"
for ip in ${MASTER_IPS[@]}
  do
    echo ">>> ${ip}"
    ssh root@${ip} "mkdir -p /opt/k8s/bin"
    scp etcd-v3.3.13-linux-amd64/etcd* root@${ip}:/opt/k8s/bin
    ssh root@${ip} "chmod +x /opt/k8s/bin/*"
  done

cd /opt/k8s/work
cat > etcd-csr.json <<EOF
{
  "CN": "etcd",
  "hosts": [
EOF

for ip in ${MASTER_IPS[@]}
  do
    echo "\"${ip}\"," >> etcd-csr.json 
  done

cat >> etcd-csr.json <<EOF
    "127.0.0.1"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "4Paradigm"
    }
  ]
}
EOF

cd /opt/k8s/work
cfssl gencert -ca=/opt/k8s/work/ca.pem \
    -ca-key=/opt/k8s/work/ca-key.pem \
    -config=/opt/k8s/work/ca-config.json \
    -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
ls etcd*pem

cd /opt/k8s/work
for ip in ${MASTER_IPS[@]}
  do
    echo ">>> ${ip}"
    ssh root@${ip} "mkdir -p /etc/etcd/cert"
    scp etcd*.pem root@${ip}:/etc/etcd/cert/
  done

cat > etcd.service.template <<EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos

[Service]
Type=notify
WorkingDirectory=${ETCD_DATA_DIR}
ExecStart=/opt/k8s/bin/etcd \\
  --data-dir=${ETCD_DATA_DIR} \\
  --wal-dir=${ETCD_WAL_DIR} \\
  --name=##NODE_NAME## \\
  --cert-file=/etc/etcd/cert/etcd.pem \\
  --key-file=/etc/etcd/cert/etcd-key.pem \\
  --trusted-ca-file=/etc/kubernetes/cert/ca.pem \\
  --peer-cert-file=/etc/etcd/cert/etcd.pem \\
  --peer-key-file=/etc/etcd/cert/etcd-key.pem \\
  --peer-trusted-ca-file=/etc/kubernetes/cert/ca.pem \\
  --peer-client-cert-auth \\
  --client-cert-auth \\
  --listen-peer-urls=https://0.0.0.0:2380 \\
  --listen-client-urls=https://0.0.0.0:2379 \\
  --initial-advertise-peer-urls=https://##NODE_IP##:2380 \\
  --advertise-client-urls=https://##NODE_IP##:2379 \\
  --initial-cluster-token=etcd-cluster-0 \\
  --initial-cluster=${ETCD_NODES} \\
  --initial-cluster-state=new \\
  --auto-compaction-mode=periodic \\
  --auto-compaction-retention=1 \\
  --max-request-bytes=33554432 \\
  --quota-backend-bytes=6442450944 \\
  --heartbeat-interval=250 \\
  --election-timeout=2000
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

for idx in ${!MASTER_IPS[@]}
do
  _name=${MASTER_HOSTNAMES[$idx]}
  _ip=${MASTER_IPS[$idx]} 
  sed -e "s/##NODE_NAME##/$_name/" -e "s/##NODE_IP##/$_ip/" etcd.service.template > etcd-$_ip.service
done

echo "----------分发启动文件&&启动-----------"
for ip in ${MASTER_IPS[@]}
  do
    echo ">>> ${ip}"
    scp etcd-${ip}.service root@${ip}:/etc/systemd/system/etcd.service
    ssh root@${ip} "rm -rf ${ETCD_DATA_DIR} ${ETCD_WAL_DIR}"
    ssh root@${ip} "mkdir -p ${ETCD_DATA_DIR} ${ETCD_WAL_DIR}"
    ssh root@${ip} "systemctl daemon-reload && systemctl enable etcd && systemctl restart etcd " &
  done

echo "等待10秒..."
sleep 10

echo "----------查询端点健康情况-----------"
for ip in ${MASTER_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "systemctl status etcd|grep Active"

  ETCDCTL_API=3 /opt/k8s/bin/etcdctl \
  --endpoints=https://${ip}:2379 \
  --cacert=/etc/kubernetes/cert/ca.pem \
  --cert=/etc/etcd/cert/etcd.pem \
  --key=/etc/etcd/cert/etcd-key.pem endpoint health
done

echo "----------查询leader选取情况-----------"
ETCDCTL_API=3 /opt/k8s/bin/etcdctl \
  -w table --cacert=/etc/kubernetes/cert/ca.pem \
  --cert=/etc/etcd/cert/etcd.pem \
  --key=/etc/etcd/cert/etcd-key.pem \
  --endpoints=${ETCD_ENDPOINTS} endpoint status

 

7setup_flannel.sh

#!/bin/bash

#只需在一个节点上运行

if [ $UID -ne 0 ]; then
  echo "本脚本需要使用root用户执行" 
  exit 0
fi

source ./configrc

if [ ! -f "/opt/k8s/work/flannel-v0.11.0-linux-amd64.tar.gz" ];then
  echo "需要先上传相关二进制文件" 
  exit 0
fi

cd /opt/k8s/work
mkdir flannel

tar -xzvf flannel-v0.11.0-linux-amd64.tar.gz -C flannel

echo "----------分发binary文件-----------"
for ip in ${NODE_IPS[@]}
  do
    echo ">>> ${ip}"
    ssh root@${ip} "mkdir -p /opt/k8s/bin"
    scp flannel/{flanneld,mk-docker-opts.sh} root@${ip}:/opt/k8s/bin/
    ssh root@${ip} "chmod +x /opt/k8s/bin/*"
  done


cd /opt/k8s/work
cat > flanneld-csr.json <<EOF
{
  "CN": "flanneld",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "4Paradigm"
    }
  ]
}
EOF

cfssl gencert -ca=/opt/k8s/work/ca.pem \
  -ca-key=/opt/k8s/work/ca-key.pem \
  -config=/opt/k8s/work/ca-config.json \
  -profile=kubernetes flanneld-csr.json | cfssljson -bare flanneld
ls flanneld*pem

#分发到其他节点master and worker
for ip in ${NODE_IPS[@]}
  do
    echo ">>> ${ip}"
    ssh root@${ip} "mkdir -p /etc/flanneld/cert"
    scp flanneld*.pem root@${ip}:/etc/flanneld/cert
  done

cd /opt/k8s/work
etcdctl \
  --endpoints=${ETCD_ENDPOINTS} \
  --ca-file=/opt/k8s/work/ca.pem \
  --cert-file=/opt/k8s/work/flanneld.pem \
  --key-file=/opt/k8s/work/flanneld-key.pem \
  mk ${FLANNEL_ETCD_PREFIX}/config '{"Network":"'${CLUSTER_CIDR}'", "SubnetLen": 21, "Backend": {"Type": "vxlan"}}'

cd /opt/k8s/work
cat > flanneld.service << EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network.target
After=network-online.target
Wants=network-online.target
After=etcd.service
Before=docker.service

[Service]
Type=notify
ExecStart=/opt/k8s/bin/flanneld \\
  -etcd-cafile=/etc/kubernetes/cert/ca.pem \\
  -etcd-certfile=/etc/flanneld/cert/flanneld.pem \\
  -etcd-keyfile=/etc/flanneld/cert/flanneld-key.pem \\
  -etcd-endpoints=${ETCD_ENDPOINTS} \\
  -etcd-prefix=${FLANNEL_ETCD_PREFIX} \\
  -iface=${IFACE} \\
  -ip-masq
ExecStartPost=/opt/k8s/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
Restart=always
RestartSec=5
StartLimitInterval=0

[Install]
WantedBy=multi-user.target
RequiredBy=docker.service
EOF

echo "----------分发启动文件-----------"
cd /opt/k8s/work
for ip in ${NODE_IPS[@]}
  do
    echo ">>> ${ip}"
    scp flanneld.service root@${ip}:/etc/systemd/system/
  done

echo "----------启动flannel服务-----------"
for ip in ${NODE_IPS[@]}
  do
    echo ">>> ${ip}"
    ssh root@${ip} "systemctl daemon-reload && systemctl enable flanneld && systemctl restart flanneld"
  done

echo "----------查看服务启动情况-----------"
for ip in ${NODE_IPS[@]}
  do
    echo ">>> ${ip}"
    ssh root@${ip} "systemctl status flanneld|grep Active"
  done

echo "----------查看集群 Pod 网段-----------"
source /opt/k8s/bin/environment.sh
etcdctl \
  --endpoints=${ETCD_ENDPOINTS} \
  --ca-file=/etc/kubernetes/cert/ca.pem \
  --cert-file=/etc/flanneld/cert/flanneld.pem \
  --key-file=/etc/flanneld/cert/flanneld-key.pem \
  get ${FLANNEL_ETCD_PREFIX}/config


echo "----------查看已分配的 Pod 子网段列表-----------"
etcdctl \
  --endpoints=${ETCD_ENDPOINTS} \
  --ca-file=/etc/kubernetes/cert/ca.pem \
  --cert-file=/etc/flanneld/cert/flanneld.pem \
  --key-file=/etc/flanneld/cert/flanneld-key.pem \
  ls ${FLANNEL_ETCD_PREFIX}/subnets

echo "---------验证各节点能通过 Pod 网段互通---------"
FLANNEL_IPS=$NODE_IPS
for idx in ${!NODE_IPS[@]}
do
  ip=${NODE_IPS[$idx]} 
  ssh ${ip} "/usr/sbin/ip addr show flannel.1|grep -w inet"
  FLANNEL_IPS[$idx]=`ssh ${ip} "/usr/sbin/ip addr show flannel.1|grep -w inet" | awk '{print $2}' |  awk -F/ '{print $1}'`
done


for ip in ${NODE_IPS[@]}
  do
    echo ">>> ${ip}"
    for POD_IP in ${FLANNEL_IPS[@]}
    do
      ssh $ip "ping -c 1 $POD_IP"
    done
    echo ""
    echo ""
  done

 

8setup_nginx.sh

#!/bin/bash

#只需在一个节点上运行

if [ $UID -ne 0 ]; then
  echo "本脚本需要使用root用户执行" 
  exit 0
fi

source ./configrc

cd /opt/k8s/work

if [ ! -f "/opt/k8s/work/nginx-1.15.3.tar.gz" ];then
  wget http://nginx.org/download/nginx-1.15.3.tar.gz
fi

rm -rf /opt/k8s/work/nginx-1.15.3
tar -xzvf nginx-1.15.3.tar.gz

yum install -y gcc-c++
cd /opt/k8s/work/nginx-1.15.3
mkdir nginx-prefix
./configure --with-stream --without-http --prefix=$(pwd)/nginx-prefix --without-http_uwsgi_module --without-http_scgi_module --without-http_fastcgi_module
make && make install

./nginx-prefix/sbin/nginx -v
ldd ./nginx-prefix/sbin/nginx

cd /opt/k8s/work
for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "mkdir -p /opt/k8s/kube-nginx/{conf,logs,sbin}"
  scp /opt/k8s/work/nginx-1.15.3/nginx-prefix/sbin/nginx  root@${ip}:/opt/k8s/kube-nginx/sbin/kube-nginx
  ssh root@${ip} "chmod a+x /opt/k8s/kube-nginx/sbin/*"
done

cd /opt/k8s/work
cat > kube-nginx.conf << \EOF
worker_processes 1;
events {
    worker_connections  1024;
}
stream {
    upstream backend {
        hash $remote_addr consistent;
EOF

for ip in ${NODE_IPS[@]}
do
  echo "        server ${ip}:6443        max_fails=3 fail_timeout=30s;" >> kube-nginx.conf 
done

cat >> kube-nginx.conf << EOF
    }
    server {
        listen ${KUBE_APISERVER#*//};
        proxy_connect_timeout 1s;
        proxy_pass backend;
    }
}
EOF

cd /opt/k8s/work
for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  scp kube-nginx.conf  root@${ip}:/opt/k8s/kube-nginx/conf/kube-nginx.conf
done

cd /opt/k8s/work
cat > kube-nginx.service <<EOF
[Unit]
Description=kube-apiserver nginx proxy
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=forking
ExecStartPre=/opt/k8s/kube-nginx/sbin/kube-nginx -c /opt/k8s/kube-nginx/conf/kube-nginx.conf -p /opt/k8s/kube-nginx -t
ExecStart=/opt/k8s/kube-nginx/sbin/kube-nginx -c /opt/k8s/kube-nginx/conf/kube-nginx.conf -p /opt/k8s/kube-nginx
ExecReload=/opt/k8s/kube-nginx/sbin/kube-nginx -c /opt/k8s/kube-nginx/conf/kube-nginx.conf -p /opt/k8s/kube-nginx -s reload
PrivateTmp=true
Restart=always
RestartSec=5
StartLimitInterval=0
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  scp kube-nginx.service  root@${ip}:/etc/systemd/system/
  ssh root@${ip} "systemctl daemon-reload && systemctl enable kube-nginx && systemctl restart kube-nginx"
done

for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "systemctl status kube-nginx |grep 'Active:'"
done

 

9setup_master_api-server.sh

#!/bin/bash

#只需在一个节点上运行

if [ $UID -ne 0 ]; then
  echo "本脚本需要使用root用户执行" 
  exit 0
fi

source ./configrc

if [ ! -f "/opt/k8s/work/kubernetes-server-linux-amd64.tar.gz" ];then
  echo "需要先上传相关二进制文件" 
  exit 0
fi

cd /opt/k8s/work
tar -xzvf kubernetes-server-linux-amd64.tar.gz

echo "-------------分发二进制文件----------------"
for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "mkdir -p /opt/k8s/bin"
  scp kubernetes/server/bin/{apiextensions-apiserver,cloud-controller-manager,kube-apiserver,kube-controller-manager,kube-proxy,kube-scheduler,kubeadm,kubectl,kubelet,mounter} root@${ip}:/opt/k8s/bin/
  ssh root@${ip} "chmod +x /opt/k8s/bin/*"
done


echo "=======================api server 集群====================="
echo "-------------创建分发配置文件和证书----------------"
cd /opt/k8s/work
cat > kubernetes-csr.json <<EOF
{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
EOF

for ip in ${MASTER_IPS[@]}
do
  echo "    \"${ip}\"," >> kubernetes-csr.json 
done

cat >> kubernetes-csr.json <<EOF
    "${CLUSTER_KUBERNETES_SVC_IP}",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local."
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "4Paradigm"
    }
  ]
}
EOF

cfssl gencert -ca=/opt/k8s/work/ca.pem \
  -ca-key=/opt/k8s/work/ca-key.pem \
  -config=/opt/k8s/work/ca-config.json \
  -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes
ls kubernetes*pem

cat > encryption-config.yaml <<EOF
kind: EncryptionConfig
apiVersion: v1
resources:
  - resources:
      - secrets
    providers:
      - aescbc:
          keys:
            - name: key1
              secret: ${ENCRYPTION_KEY}
      - identity: {}
EOF

cat > audit-policy.yaml <<EOF
apiVersion: audit.k8s.io/v1beta1
kind: Policy
rules:
  # The following requests were manually identified as high-volume and low-risk, so drop them.
  - level: None
    resources:
      - group: ""
        resources:
          - endpoints
          - services
          - services/status
    users:
      - 'system:kube-proxy'
    verbs:
      - watch

  - level: None
    resources:
      - group: ""
        resources:
          - nodes
          - nodes/status
    userGroups:
      - 'system:nodes'
    verbs:
      - get

  - level: None
    namespaces:
      - kube-system
    resources:
      - group: ""
        resources:
          - endpoints
    users:
      - 'system:kube-controller-manager'
      - 'system:kube-scheduler'
      - 'system:serviceaccount:kube-system:endpoint-controller'
    verbs:
      - get
      - update

  - level: None
    resources:
      - group: ""
        resources:
          - namespaces
          - namespaces/status
          - namespaces/finalize
    users:
      - 'system:apiserver'
    verbs:
      - get

  # Don't log HPA fetching metrics.
  - level: None
    resources:
      - group: metrics.k8s.io
    users:
      - 'system:kube-controller-manager'
    verbs:
      - get
      - list

  # Don't log these read-only URLs.
  - level: None
    nonResourceURLs:
      - '/healthz*'
      - /version
      - '/swagger*'

  # Don't log events requests.
  - level: None
    resources:
      - group: ""
        resources:
          - events

  # node and pod status calls from nodes are high-volume and can be large, don't log responses for expected updates from nodes
  - level: Request
    omitStages:
      - RequestReceived
    resources:
      - group: ""
        resources:
          - nodes/status
          - pods/status
    users:
      - kubelet
      - 'system:node-problem-detector'
      - 'system:serviceaccount:kube-system:node-problem-detector'
    verbs:
      - update
      - patch

  - level: Request
    omitStages:
      - RequestReceived
    resources:
      - group: ""
        resources:
          - nodes/status
          - pods/status
    userGroups:
      - 'system:nodes'
    verbs:
      - update
      - patch

  # deletecollection calls can be large, don't log responses for expected namespace deletions
  - level: Request
    omitStages:
      - RequestReceived
    users:
      - 'system:serviceaccount:kube-system:namespace-controller'
    verbs:
      - deletecollection

  # Secrets, ConfigMaps, and TokenReviews can contain sensitive & binary data,
  # so only log at the Metadata level.
  - level: Metadata
    omitStages:
      - RequestReceived
    resources:
      - group: ""
        resources:
          - secrets
          - configmaps
      - group: authentication.k8s.io
        resources:
          - tokenreviews
  # Get repsonses can be large; skip them.
  - level: Request
    omitStages:
      - RequestReceived
    resources:
      - group: ""
      - group: admissionregistration.k8s.io
      - group: apiextensions.k8s.io
      - group: apiregistration.k8s.io
      - group: apps
      - group: authentication.k8s.io
      - group: authorization.k8s.io
      - group: autoscaling
      - group: batch
      - group: certificates.k8s.io
      - group: extensions
      - group: metrics.k8s.io
      - group: networking.k8s.io
      - group: policy
      - group: rbac.authorization.k8s.io
      - group: scheduling.k8s.io
      - group: settings.k8s.io
      - group: storage.k8s.io
    verbs:
      - get
      - list
      - watch

  # Default level for known APIs
  - level: RequestResponse
    omitStages:
      - RequestReceived
    resources:
      - group: ""
      - group: admissionregistration.k8s.io
      - group: apiextensions.k8s.io
      - group: apiregistration.k8s.io
      - group: apps
      - group: authentication.k8s.io
      - group: authorization.k8s.io
      - group: autoscaling
      - group: batch
      - group: certificates.k8s.io
      - group: extensions
      - group: metrics.k8s.io
      - group: networking.k8s.io
      - group: policy
      - group: rbac.authorization.k8s.io
      - group: scheduling.k8s.io
      - group: settings.k8s.io
      - group: storage.k8s.io

  # Default level for all other requests.
  - level: Metadata
    omitStages:
      - RequestReceived
EOF


cat > proxy-client-csr.json <<EOF
{
  "CN": "aggregator",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "4Paradigm"
    }
  ]
}
EOF

cfssl gencert -ca=/etc/kubernetes/cert/ca.pem \
  -ca-key=/etc/kubernetes/cert/ca-key.pem  \
  -config=/etc/kubernetes/cert/ca-config.json  \
  -profile=kubernetes proxy-client-csr.json | cfssljson -bare proxy-client
ls proxy-client*.pem


for ip in ${MASTER_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "mkdir -p /etc/kubernetes/cert"
  scp kubernetes*.pem root@${ip}:/etc/kubernetes/cert/
  scp encryption-config.yaml root@${ip}:/etc/kubernetes/
  scp audit-policy.yaml root@${ip}:/etc/kubernetes/audit-policy.yaml
  scp proxy-client*.pem root@${ip}:/etc/kubernetes/cert/
done


echo "-------------为各节点创建和分发 kube-apiserver systemd unit 文件----------------"
cat > kube-apiserver.service.template <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=${K8S_DIR}/kube-apiserver
ExecStart=/opt/k8s/bin/kube-apiserver \\
  --advertise-address=##NODE_IP## \\
  --default-not-ready-toleration-seconds=360 \\
  --default-unreachable-toleration-seconds=360 \\
  --feature-gates=DynamicAuditing=true \\
  --max-mutating-requests-inflight=2000 \\
  --max-requests-inflight=4000 \\
  --default-watch-cache-size=200 \\
  --delete-collection-workers=2 \\
  --encryption-provider-config=/etc/kubernetes/encryption-config.yaml \\
  --etcd-cafile=/etc/kubernetes/cert/ca.pem \\
  --etcd-certfile=/etc/kubernetes/cert/kubernetes.pem \\
  --etcd-keyfile=/etc/kubernetes/cert/kubernetes-key.pem \\
  --etcd-servers=${ETCD_ENDPOINTS} \\
  --bind-address=##NODE_IP## \\
  --secure-port=6443 \\
  --tls-cert-file=/etc/kubernetes/cert/kubernetes.pem \\
  --tls-private-key-file=/etc/kubernetes/cert/kubernetes-key.pem \\
  --insecure-port=0 \\
  --audit-dynamic-configuration \\
  --audit-log-maxage=15 \\
  --audit-log-maxbackup=3 \\
  --audit-log-maxsize=100 \\
  --audit-log-truncate-enabled \\
  --audit-log-path=${K8S_DIR}/kube-apiserver/audit.log \\
  --audit-policy-file=/etc/kubernetes/audit-policy.yaml \\
  --profiling \\
  --anonymous-auth=false \\
  --client-ca-file=/etc/kubernetes/cert/ca.pem \\
  --enable-bootstrap-token-auth \\
  --requestheader-allowed-names="aggregator" \\
  --requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \\
  --requestheader-extra-headers-prefix="X-Remote-Extra-" \\
  --requestheader-group-headers=X-Remote-Group \\
  --requestheader-username-headers=X-Remote-User \\
  --service-account-key-file=/etc/kubernetes/cert/ca.pem \\
  --authorization-mode=Node,RBAC \\
  --runtime-config=api/all=true \\
  --enable-admission-plugins=NodeRestriction \\
  --allow-privileged=true \\
  --apiserver-count=3 \\
  --event-ttl=168h \\
  --kubelet-certificate-authority=/etc/kubernetes/cert/ca.pem \\
  --kubelet-client-certificate=/etc/kubernetes/cert/kubernetes.pem \\
  --kubelet-client-key=/etc/kubernetes/cert/kubernetes-key.pem \\
  --kubelet-https=true \\
  --kubelet-timeout=10s \\
  --proxy-client-cert-file=/etc/kubernetes/cert/proxy-client.pem \\
  --proxy-client-key-file=/etc/kubernetes/cert/proxy-client-key.pem \\
  --service-cluster-ip-range=${SERVICE_CIDR} \\
  --service-node-port-range=${NODE_PORT_RANGE} \\
  --logtostderr=true \\
  --v=2
Restart=on-failure
RestartSec=10
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

cd /opt/k8s/work
for ip in ${MASTER_IPS[@]}
do
  sed -e "s/##NODE_IP##/${ip}/" kube-apiserver.service.template > kube-apiserver-${ip}.service
done
ls kube-apiserver*.service

cd /opt/k8s/work
for ip in ${MASTER_IPS[@]}
do
  echo ">>> ${ip}"
  scp kube-apiserver-${ip}.service root@${ip}:/etc/systemd/system/kube-apiserver.service
done

echo "-------------启动 kube-apiserver 服务----------------"
for ip in ${MASTER_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "mkdir -p ${K8S_DIR}/kube-apiserver"
  ssh root@${ip} "systemctl daemon-reload && systemctl enable kube-apiserver && systemctl restart kube-apiserver"
done

echo "-------------检查 kube-apiserver 运行状态----------------"
for ip in ${MASTER_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "systemctl status kube-apiserver |grep 'Active:'"
done

echo "-------------打印 kube-apiserver 写入 etcd 的数据----------------"
ETCDCTL_API=3 etcdctl \
    --endpoints=${ETCD_ENDPOINTS} \
    --cacert=/opt/k8s/work/ca.pem \
    --cert=/opt/k8s/work/etcd.pem \
    --key=/opt/k8s/work/etcd-key.pem \
    get /registry/ --prefix --keys-only


echo "-------------检查集群信息----------------"
kubectl cluster-info
kubectl get all --all-namespaces
kubectl get componentstatuses

echo "-------------检查 kube-apiserver 监听的端口----------------"
sudo netstat -lnpt|grep kube

echo "-------------授予 kube-apiserver 访问 kubelet API 的权限----------------"
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes

 

10 10setup_master_controller-manager.sh

#!/bin/bash

#只需在一个节点上运行

if [ $UID -ne 0 ]; then
  echo "本脚本需要使用root用户执行" 
  exit 0
fi

source ./configrc

cd /opt/k8s/work

echo "=======================kube-controller-manager 集群====================="
echo "-------------创建分发配置文件和证书----------------"
cd /opt/k8s/work
cat > kube-controller-manager-csr.json <<EOF
{
    "CN": "system:kube-controller-manager",
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "hosts": [
EOF

for ip in ${MASTER_IPS[@]}
do
  echo "    \"${ip}\"," >> kube-controller-manager-csr.json 
done

cat >> kube-controller-manager-csr.json <<EOF
      "127.0.0.1"
    ],
    "names": [
      {
        "C": "CN",
        "ST": "BeiJing",
        "L": "BeiJing",
        "O": "system:kube-controller-manager",
        "OU": "4Paradigm"
      }
    ]
}
EOF

cd /opt/k8s/work
cfssl gencert -ca=/opt/k8s/work/ca.pem \
  -ca-key=/opt/k8s/work/ca-key.pem \
  -config=/opt/k8s/work/ca-config.json \
  -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
ls kube-controller-manager*pem

cd /opt/k8s/work
kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/k8s/work/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-controller-manager.kubeconfig

kubectl config set-credentials system:kube-controller-manager \
  --client-certificate=kube-controller-manager.pem \
  --client-key=kube-controller-manager-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-controller-manager.kubeconfig

kubectl config set-context system:kube-controller-manager \
  --cluster=kubernetes \
  --user=system:kube-controller-manager \
  --kubeconfig=kube-controller-manager.kubeconfig

kubectl config use-context system:kube-controller-manager --kubeconfig=kube-controller-manager.kubeconfig

for ip in ${MASTER_IPS[@]}
do
  echo ">>> ${ip}"
  scp kube-controller-manager*.pem root@${ip}:/etc/kubernetes/cert/
  scp kube-controller-manager.kubeconfig root@${ip}:/etc/kubernetes/
done


echo "-------------创建服务配置文件,启动服务----------------"
cd /opt/k8s/work
cat > kube-controller-manager.service.template <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
WorkingDirectory=${K8S_DIR}/kube-controller-manager
ExecStart=/opt/k8s/bin/kube-controller-manager \\
  --profiling \\
  --cluster-name=kubernetes \\
  --controllers=*,bootstrapsigner,tokencleaner \\
  --kube-api-qps=1000 \\
  --kube-api-burst=2000 \\
  --leader-elect \\
  --use-service-account-credentials=true \\
  --concurrent-service-syncs=2 \\
  --bind-address=##NODE_IP## \\
  --secure-port=10252 \\
  --tls-cert-file=/etc/kubernetes/cert/kube-controller-manager.pem \\
  --tls-private-key-file=/etc/kubernetes/cert/kube-controller-manager-key.pem \\
  --port=0 \\
  --authentication-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \\
  --client-ca-file=/etc/kubernetes/cert/ca.pem \\
  --requestheader-allowed-names="" \\
  --requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \\
  --requestheader-extra-headers-prefix="X-Remote-Extra-" \\
  --requestheader-group-headers=X-Remote-Group \\
  --requestheader-username-headers=X-Remote-User \\
  --authorization-kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \\
  --cluster-signing-cert-file=/etc/kubernetes/cert/ca.pem \\
  --cluster-signing-key-file=/etc/kubernetes/cert/ca-key.pem \\
  --experimental-cluster-signing-duration=876000h \\
  --horizontal-pod-autoscaler-sync-period=10s \\
  --concurrent-deployment-syncs=10 \\
  --concurrent-gc-syncs=30 \\
  --node-cidr-mask-size=24 \\
  --service-cluster-ip-range=${SERVICE_CIDR} \\
  --pod-eviction-timeout=6m \\
  --terminated-pod-gc-threshold=10000 \\
  --root-ca-file=/etc/kubernetes/cert/ca.pem \\
  --service-account-private-key-file=/etc/kubernetes/cert/ca-key.pem \\
  --kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \\
  --logtostderr=true \\
  --v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

for ip in ${MASTER_IPS[@]}
do
  sed -e "s/##NODE_IP##/${ip}/" kube-controller-manager.service.template > kube-controller-manager-${ip}.service
done
ls kube-controller-manager*.service

for ip in ${MASTER_IPS[@]}
do
  echo ">>> ${ip}"
  scp kube-controller-manager-${ip}.service root@${ip}:/etc/systemd/system/kube-controller-manager.service
done

for ip in ${MASTER_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "mkdir -p ${K8S_DIR}/kube-controller-manager"
  ssh root@${ip} "systemctl daemon-reload && systemctl enable kube-controller-manager && systemctl restart kube-controller-manager"
done


echo "-------------检查服务运行状态----------------"
for ip in ${MASTER_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "systemctl status kube-controller-manager|grep Active"
done

echo "-------------检查 kube-controller-manager 监听的端口----------------"
netstat -lnpt | grep kube-cont

echo "-------------查看输出的 metrics----------------"
curl -s --cacert /opt/k8s/work/ca.pem --cert /opt/k8s/work/admin.pem --key /opt/k8s/work/admin-key.pem https://`hostname`:10252/metrics |head

kubectl describe clusterrole system:kube-controller-manager
kubectl get clusterrole | grep controller
kubectl get endpoints kube-controller-manager --namespace=kube-system  -o yaml

echo "-------------检查集群信息----------------"
kubectl cluster-info
kubectl get all --all-namespaces
kubectl get componentstatuses

 

11setup_master_scheduler.sh

#!/bin/bash

#只需在一个节点上运行

if [ $UID -ne 0 ]; then
  echo "本脚本需要使用root用户执行" 
  exit 0
fi

source ./configrc

echo "=======================kube-scheduler集群====================="
echo "-------------创建分发配置文件和证书----------------"

cd /opt/k8s/work
cat > kube-scheduler-csr.json <<EOF
{
    "CN": "system:kube-scheduler",
    "hosts": [
EOF

for ip in ${MASTER_IPS[@]}
do
  echo "    \"${ip}\"," >> kube-scheduler-csr.json 
done

cat >> kube-scheduler-csr.json <<EOF
      "127.0.0.1"
    ],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
      {
        "C": "CN",
        "ST": "BeiJing",
        "L": "BeiJing",
        "O": "system:kube-scheduler",
        "OU": "4Paradigm"
      }
    ]
}
EOF

cfssl gencert -ca=/opt/k8s/work/ca.pem \
  -ca-key=/opt/k8s/work/ca-key.pem \
  -config=/opt/k8s/work/ca-config.json \
  -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
ls kube-scheduler*pem

cd /opt/k8s/work
kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/k8s/work/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-scheduler.kubeconfig

kubectl config set-credentials system:kube-scheduler \
  --client-certificate=kube-scheduler.pem \
  --client-key=kube-scheduler-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-scheduler.kubeconfig

kubectl config set-context system:kube-scheduler \
  --cluster=kubernetes \
  --user=system:kube-scheduler \
  --kubeconfig=kube-scheduler.kubeconfig

kubectl config use-context system:kube-scheduler --kubeconfig=kube-scheduler.kubeconfig

for ip in ${MASTER_IPS[@]}
do
  echo ">>> ${ip}"
  scp kube-scheduler*.pem root@${ip}:/etc/kubernetes/cert/
  scp kube-scheduler.kubeconfig root@${ip}:/etc/kubernetes/
done


cd /opt/k8s/work
cat >kube-scheduler.yaml.template <<EOF
apiVersion: kubescheduler.config.k8s.io/v1alpha1
kind: KubeSchedulerConfiguration
bindTimeoutSeconds: 600
clientConnection:
  burst: 200
  kubeconfig: "/etc/kubernetes/kube-scheduler.kubeconfig"
  qps: 100
enableContentionProfiling: false
enableProfiling: true
hardPodAffinitySymmetricWeight: 1
healthzBindAddress: ##NODE_IP##:10251
leaderElection:
  leaderElect: true
metricsBindAddress: ##NODE_IP##:10251
EOF

for ip in ${MASTER_IPS[@]}
do
  sed -e "s/##NODE_IP##/${ip}/" kube-scheduler.yaml.template > kube-scheduler-${ip}.yaml
done
ls kube-scheduler*.yaml

for ip in ${MASTER_IPS[@]}
do
  echo ">>> ${ip}"
  scp kube-scheduler-${ip}.yaml root@${ip}:/etc/kubernetes/kube-scheduler.yaml
done

cd /opt/k8s/work
cat > kube-scheduler.service.template <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
WorkingDirectory=${K8S_DIR}/kube-scheduler
ExecStart=/opt/k8s/bin/kube-scheduler \\
  --config=/etc/kubernetes/kube-scheduler.yaml \\
  --bind-address=##NODE_IP## \\
  --secure-port=10259 \\
  --port=0 \\
  --tls-cert-file=/etc/kubernetes/cert/kube-scheduler.pem \\
  --tls-private-key-file=/etc/kubernetes/cert/kube-scheduler-key.pem \\
  --authentication-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \\
  --client-ca-file=/etc/kubernetes/cert/ca.pem \\
  --requestheader-allowed-names="" \\
  --requestheader-client-ca-file=/etc/kubernetes/cert/ca.pem \\
  --requestheader-extra-headers-prefix="X-Remote-Extra-" \\
  --requestheader-group-headers=X-Remote-Group \\
  --requestheader-username-headers=X-Remote-User \\
  --authorization-kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig \\
  --logtostderr=true \\
  --v=2
Restart=always
RestartSec=5
StartLimitInterval=0

[Install]
WantedBy=multi-user.target
EOF

for ip in ${MASTER_IPS[@]}
do
  sed -e "s/##NODE_IP##/${ip}/" kube-scheduler.service.template > kube-scheduler-${ip}.service
done
ls kube-scheduler*.service

for ip in ${MASTER_IPS[@]}
do
  echo ">>> ${ip}"
  scp kube-scheduler-${ip}.service root@${ip}:/etc/systemd/system/kube-scheduler.service
done

for ip in ${MASTER_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "mkdir -p ${K8S_DIR}/kube-scheduler"
  ssh root@${ip} "systemctl daemon-reload && systemctl enable kube-scheduler && systemctl restart kube-scheduler"
done

echo "-------------检查服务运行状态----------------"
for ip in ${MASTER_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "systemctl status kube-scheduler|grep Active"
done

echo "-------------检查监听的端口----------------"
netstat -lnpt |grep kube-sch

echo "-------------查看输出的 metrics----------------"
curl -s http://127.0.0.1:10251/metrics |head
curl -s --cacert /opt/k8s/work/ca.pem --cert /opt/k8s/work/admin.pem --key /opt/k8s/work/admin-key.pem https://`hostname`:10259/metrics |head

echo "-------------检查集群信息----------------"
kubectl get endpoints kube-scheduler --namespace=kube-system  -o yaml
kubectl cluster-info
kubectl get all --all-namespaces
kubectl get componentstatuses

 

12setup_worker_docker.sh

#!/bin/bash

#只需在一个节点上运行

if [ $UID -ne 0 ]; then
  echo "本脚本需要使用root用户执行" 
  exit 0
fi

source ./configrc

echo "==================安装docker================="
cd /opt/k8s/work
if [ ! -f "/opt/k8s/work/docker-18.09.8.tgz" ];then
  wget https://download.docker.com/linux/static/stable/x86_64/docker-18.09.8.tgz
fi
tar -xvf docker-18.09.8.tgz

for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "mkdir -p /opt/k8s/bin"
  scp docker/*  root@${ip}:/opt/k8s/bin/
  ssh root@${ip} "chmod +x /opt/k8s/bin/*"
done

cd /opt/k8s/work
cat > docker.service <<"EOF"
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.io

[Service]
WorkingDirectory=##DOCKER_DIR##
Environment="PATH=/opt/k8s/bin:/bin:/sbin:/usr/bin:/usr/sbin"
EnvironmentFile=-/run/flannel/docker
ExecStart=/opt/k8s/bin/dockerd $DOCKER_NETWORK_OPTIONS
ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT
ExecReload=/bin/kill -s HUP $MAINPID
Restart=on-failure
RestartSec=5
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target
EOF
sed -i -e "s|##DOCKER_DIR##|${DOCKER_DIR}|" docker.service

cat > docker-daemon.json <<EOF
{
    "registry-mirrors": ["https://n8m3bosn.mirror.aliyuncs.com"],
    "max-concurrent-downloads": 20,
    "live-restore": true,
    "max-concurrent-uploads": 10,
    "debug": true,
    "data-root": "${DOCKER_DIR}/data",
    "exec-root": "${DOCKER_DIR}/exec",
    "log-opts": {
      "max-size": "100m",
      "max-file": "5"
    }
}
EOF

for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  scp docker.service root@${ip}:/etc/systemd/system/
  ssh root@${ip} "mkdir -p  /etc/docker/ ${DOCKER_DIR}/{data,exec}"
  scp docker-daemon.json root@${ip}:/etc/docker/daemon.json
done

for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "systemctl daemon-reload && systemctl enable docker && systemctl restart docker"
done

echo "---------------查看docker服务运行情况---------------"
for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "systemctl status docker|grep Active"
done

echo "---------------docker0网桥和flannel网桥情况---------------"
for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "/usr/sbin/ip addr show flannel.1 && /usr/sbin/ip addr show docker0"
done

ps -elfH|grep docker
docker info

 

13setup_worker_kubelet.sh

#!/bin/bash

#只需在一个节点上运行

if [ $UID -ne 0 ]; then
  echo "本脚本需要使用root用户执行" 
  exit 0
fi

source ./configrc

echo "==================安装kubelet================="
echo "---------------创建token-----------------"
cd /opt/k8s/work
rm -rf kubelet-bootstrap-*.kubeconfig 
for node_name in ${NODE_HOSTNAMES[@]}
do
  echo ">>> ${node_name}"

  # 创建 token
  export BOOTSTRAP_TOKEN=$(kubeadm token create \
    --description kubelet-bootstrap-token \
    --groups system:bootstrappers:${node_name} \
    --kubeconfig ~/.kube/config)

  # 设置集群参数
  kubectl config set-cluster kubernetes \
    --certificate-authority=/etc/kubernetes/cert/ca.pem \
    --embed-certs=true \
    --server=${KUBE_APISERVER} \
    --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig

  # 设置客户端认证参数
  kubectl config set-credentials kubelet-bootstrap \
    --token=${BOOTSTRAP_TOKEN} \
    --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig

  # 设置上下文参数
  kubectl config set-context default \
    --cluster=kubernetes \
    --user=kubelet-bootstrap \
    --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig

  # 设置默认上下文
  kubectl config use-context default --kubeconfig=kubelet-bootstrap-${node_name}.kubeconfig
done

echo "---------------查看token-----------------"
kubeadm token list --kubeconfig ~/.kube/config
kubectl get secrets  -n kube-system | grep bootstrap-token

echo "---------------分发 bootstrap kubeconfig 文件到所有 worker 节点-----------------"
for node_name in ${NODE_HOSTNAMES[@]}
do
  echo ">>> ${node_name}"
  scp kubelet-bootstrap-${node_name}.kubeconfig root@${node_name}:/etc/kubernetes/kubelet-bootstrap.kubeconfig
done


echo "---------------创建和分发 kubelet 参数配置文件-----------------"
cat > kubelet-config.yaml.template <<EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: "##NODE_IP##"
staticPodPath: ""
syncFrequency: 1m
fileCheckFrequency: 20s
httpCheckFrequency: 20s
staticPodURL: ""
port: 10250
readOnlyPort: 0
rotateCertificates: true
serverTLSBootstrap: true
authentication:
  anonymous:
    enabled: false
  webhook:
    enabled: true
  x509:
    clientCAFile: "/etc/kubernetes/cert/ca.pem"
authorization:
  mode: Webhook
registryPullQPS: 0
registryBurst: 20
eventRecordQPS: 0
eventBurst: 20
enableDebuggingHandlers: true
enableContentionProfiling: true
healthzPort: 10248
healthzBindAddress: "##NODE_IP##"
clusterDomain: "${CLUSTER_DNS_DOMAIN}"
clusterDNS:
  - "${CLUSTER_DNS_SVC_IP}"
nodeStatusUpdateFrequency: 10s
nodeStatusReportFrequency: 1m
imageMinimumGCAge: 2m
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 80
volumeStatsAggPeriod: 1m
kubeletCgroups: ""
systemCgroups: ""
cgroupRoot: ""
cgroupsPerQOS: true
cgroupDriver: cgroupfs
runtimeRequestTimeout: 10m
hairpinMode: promiscuous-bridge
maxPods: 220
podCIDR: "${CLUSTER_CIDR}"
podPidsLimit: -1
resolvConf: /etc/resolv.conf
maxOpenFiles: 1000000
kubeAPIQPS: 1000
kubeAPIBurst: 2000
serializeImagePulls: false
evictionHard:
  memory.available:  "100Mi"
nodefs.available:  "10%"
nodefs.inodesFree: "5%"
imagefs.available: "15%"
evictionSoft: {}
enableControllerAttachDetach: true
failSwapOn: true
containerLogMaxSize: 20Mi
containerLogMaxFiles: 10
systemReserved: {}
kubeReserved: {}
systemReservedCgroup: ""
kubeReservedCgroup: ""
enforceNodeAllocatable: ["pods"]
EOF

for ip in ${NODE_IPS[@]}
do 
  echo ">>> ${ip}"
  sed -e "s/##NODE_IP##/${ip}/" kubelet-config.yaml.template > kubelet-config-${ip}.yaml
  scp kubelet-config-${ip}.yaml root@${ip}:/etc/kubernetes/kubelet-config.yaml
done

cat > kubelet.service.template <<EOF
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=${K8S_DIR}/kubelet
ExecStart=/opt/k8s/bin/kubelet \\
  --config=/etc/kubernetes/kubelet-config.yaml \\
  --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \\
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
  --cert-dir=/etc/kubernetes/cert \\
  --cni-conf-dir=/etc/cni/net.d \\
  --container-runtime=docker \\
  --container-runtime-endpoint=unix:///var/run/dockershim.sock \\
  --root-dir=${K8S_DIR}/kubelet \\
  --hostname-override=##NODE_NAME## \\
  --pod-infra-container-image=registry.cn-beijing.aliyuncs.com/k8s_images/pause-amd64:3.1 \\
  --image-pull-progress-deadline=15m \\
  --volume-plugin-dir=${K8S_DIR}/kubelet/kubelet-plugins/volume/exec/ \\
  --logtostderr=true \\
  --v=2
Restart=always
RestartSec=5
StartLimitInterval=0

[Install]
WantedBy=multi-user.target
EOF

for node_name in ${NODE_HOSTNAMES[@]}
do 
  echo ">>> ${node_name}"
  sed -e "s/##NODE_NAME##/${node_name}/" kubelet.service.template > kubelet-${node_name}.service
  scp kubelet-${node_name}.service root@${node_name}:/etc/systemd/system/kubelet.service
  ssh root@${node_name} "rm -rf /etc/systemd/system/multi-user.target.wants/kubelet.service"
done

echo "----------------Bootstrap Token Auth 和授予权限-------------"
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --group=system:bootstrappers

echo "----------------启动服务-------------"
for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "mkdir -p ${K8S_DIR}/kubelet/kubelet-plugins/volume/exec/"
  ssh root@${ip} "/usr/sbin/swapoff -a"
  ssh root@${ip} "systemctl daemon-reload && systemctl enable kubelet && systemctl restart kubelet"
done

echo "-------------检查服务运行状态----------------"
for ip in ${MASTER_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "systemctl status kubelet | grep Active"
done

echo "----------------自动 approve CSR 请求-------------"
cat > csr-crb.yaml <<EOF
 # Approve all CSRs for the group "system:bootstrappers"
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: auto-approve-csrs-for-group
 subjects:
 - kind: Group
   name: system:bootstrappers
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
   apiGroup: rbac.authorization.k8s.io
---
 # To let a node of the group "system:nodes" renew its own credentials
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: node-client-cert-renewal
 subjects:
 - kind: Group
   name: system:nodes
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
   apiGroup: rbac.authorization.k8s.io
---
# A ClusterRole which instructs the CSR approver to approve a node requesting a
# serving cert matching its client cert.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: approve-node-server-renewal-csr
rules:
- apiGroups: ["certificates.k8s.io"]
  resources: ["certificatesigningrequests/selfnodeserver"]
  verbs: ["create"]
---
 # To let a node of the group "system:nodes" renew its own server credentials
 kind: ClusterRoleBinding
 apiVersion: rbac.authorization.k8s.io/v1
 metadata:
   name: node-server-cert-renewal
 subjects:
 - kind: Group
   name: system:nodes
   apiGroup: rbac.authorization.k8s.io
 roleRef:
   kind: ClusterRole
   name: approve-node-server-renewal-csr
   apiGroup: rbac.authorization.k8s.io
EOF
kubectl apply -f csr-crb.yaml

kubectl get csr
kubectl get nodes
netstat -lnpt | grep kubelet


echo "---------------等待一段时间,执行一下命令,检查---------"
echo ""
echo "kubectl get csr"
echo ""
echo ""
echo "kubectl get nodes"

echo "在节点加入集群的过程中,需要手动进行approving操作:"
echo "基于安全性考虑,CSR approving controllers 不会自动 approve kubelet server 证书签名请求,需要手动 approve:"
echo "eg:"
echo "kubectl certificate approve csr-xxxxxx"

echo ""
echo "netstat -lnpt|grep kubelet"

 

14

#!/bin/bash

#只需在一个节点上运行

if [ $UID -ne 0 ]; then
  echo "本脚本需要使用root用户执行" 
  exit 0
fi

source ./configrc

echo "==================安装kube-proxy================="
echo "------------------创建配置文件并分发------------"
cd /opt/k8s/work
cat > kube-proxy-csr.json <<EOF
{
  "CN": "system:kube-proxy",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "BeiJing",
      "L": "BeiJing",
      "O": "k8s",
      "OU": "4Paradigm"
    }
  ]
}
EOF

cfssl gencert -ca=/opt/k8s/work/ca.pem \
  -ca-key=/opt/k8s/work/ca-key.pem \
  -config=/opt/k8s/work/ca-config.json \
  -profile=kubernetes  kube-proxy-csr.json | cfssljson -bare kube-proxy
ls kube-proxy*

cd /opt/k8s/work
rm -rf kube-proxy.kubeconfig 
kubectl config set-cluster kubernetes \
  --certificate-authority=/opt/k8s/work/ca.pem \
  --embed-certs=true \
  --server=${KUBE_APISERVER} \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials kube-proxy \
  --client-certificate=kube-proxy.pem \
  --client-key=kube-proxy-key.pem \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default \
  --cluster=kubernetes \
  --user=kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

for node_name in ${NODE_HOSTNAMES[@]}
do
  echo ">>> ${node_name}"
  scp kube-proxy.kubeconfig root@${node_name}:/etc/kubernetes/
done

cat > kube-proxy-config.yaml.template <<EOF
kind: KubeProxyConfiguration
apiVersion: kubeproxy.config.k8s.io/v1alpha1
clientConnection:
  burst: 200
  kubeconfig: "/etc/kubernetes/kube-proxy.kubeconfig"
  qps: 100
bindAddress: ##NODE_IP##
healthzBindAddress: ##NODE_IP##:10256
metricsBindAddress: ##NODE_IP##:10249
enableProfiling: true
clusterCIDR: ${CLUSTER_CIDR}
hostnameOverride: ##NODE_NAME##
mode: "ipvs"
portRange: ""
kubeProxyIPTablesConfiguration:
  masqueradeAll: false
kubeProxyIPVSConfiguration:
  scheduler: rr
  excludeCIDRs: []
EOF

for idx in ${!NODE_IPS[@]}
do
  echo ">>> $_name"
  _name=${NODE_HOSTNAMES[$idx]}
  _ip=${NODE_IPS[$idx]} 
  sed -e "s/##NODE_NAME##/$_name/" -e "s/##NODE_IP##/$_ip/" kube-proxy-config.yaml.template > kube-proxy-config-$_name.yaml
  scp kube-proxy-config-$_name.yaml root@$_name:/etc/kubernetes/kube-proxy-config.yaml
done

echo "------------------创建服务并启动------------"
cat > kube-proxy.service <<EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
WorkingDirectory=${K8S_DIR}/kube-proxy
ExecStart=/opt/k8s/bin/kube-proxy \\
  --config=/etc/kubernetes/kube-proxy-config.yaml \\
  --logtostderr=true \\
  --v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
EOF

for node_name in ${NODE_HOSTNAMES[@]}
do 
  echo ">>> ${node_name}"
  scp kube-proxy.service root@${node_name}:/etc/systemd/system/
done

for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "rm -rf ${K8S_DIR}/kube-proxy"
  ssh root@${ip} "mkdir -p ${K8S_DIR}/kube-proxy"
  ssh root@${ip} "modprobe ip_vs_rr"
  ssh root@${ip} "systemctl daemon-reload && systemctl enable kube-proxy && systemctl restart kube-proxy"
done


echo "------------------查看服务情况------------"
for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "systemctl status kube-proxy | grep Active"
done

echo "------------------查看监听端口------------"
netstat -lnpt | grep kube-prox

echo "------------------查看 ipvs 路由规则------------"
for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  ssh root@${ip} "/usr/sbin/ipvsadm -ln"
done


 

15

#!/bin/bash

#只需在一个节点上运行

if [ $UID -ne 0 ]; then
  echo "本脚本需要使用root用户执行" 
  exit 0
fi

source ./configrc

echo "------------节点状态----------"
kubectl get nodes

cd /opt/k8s/work
cat > nginx-ds.yml <<EOF
apiVersion: v1
kind: Service
metadata:
  name: nginx-ds
  labels:
    app: nginx-ds
spec:
  type: NodePort
  selector:
    app: nginx-ds
  ports:
  - name: http
    port: 80
    targetPort: 80
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: nginx-ds
  labels:
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  selector:
    matchLabels:
      app: nginx-ds
  template:
    metadata:
      labels:
        app: nginx-ds
    spec:
      containers:
      - name: my-nginx
        image: nginx:1.7.9
        ports:
        - containerPort: 80
EOF

echo ""
echo ""
echo "------------创建测试资源--------------"
echo ""
kubectl apply -f nginx-ds.yml


echo "等待5秒..."
sleep 5 

kubectl get pods  -o wide | grep nginx-ds

echo ""
echo ""
echo "------------检查各节点的 Pod IP 连通性--------------"
echo ""
for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  kubectl get pods  -o wide | grep nginx-ds | awk '{print $6}' | while read pod_ip
  do
    ssh ${ip} "ping -c 1 ${ip}"
  done
done

echo ""
echo ""
echo "------------检查服务 IP 和端口可达性--------------"
echo ""
kubectl get svc | grep nginx-ds
for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  kubectl get svc | grep nginx-ds | awk '{print $3}' | while read svc_ip
  do
    ssh ${ip} "curl -s ${svc_ip}"
  done
done


echo ""
echo ""
echo "------------检查服务的 NodePort 可达性--------------"
echo ""
for ip in ${NODE_IPS[@]}
do
  echo ">>> ${ip}"
  kubectl get svc | grep nginx-ds | awk '{print $5}' | awk -F '[:/]' '{print $2}' | while read svc_port
  do
    ssh ${ip} "curl -s ${ip}:${svc_port}"
  done
done

echo "------------删除测试资源--------------"
kubectl delete -f nginx-ds.yml

 

16

#!/bin/bash

#只需在一个节点上运行


if [ $UID -ne 0 ]; then
  echo "本脚本需要使用root用户执行" 
  exit 0
fi

source ./configrc

cd /opt/k8s/work
cat > coredns.yaml <<EOF
# __MACHINE_GENERATED_WARNING__

apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
      addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health
        kubernetes __PILLAR__DNS__DOMAIN__ in-addr.arpa ip6.arpa {
            pods insecure
            upstream
            fallthrough in-addr.arpa ip6.arpa
            ttl 30
        }
        prometheus :9153
        forward . /etc/resolv.conf
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        beta.kubernetes.io/os: linux
      containers:
      - name: coredns
        image: registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.3.1
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 200Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: __PILLAR__DNS__SERVER__
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
EOF

sed -i -e "s/__PILLAR__DNS__DOMAIN__/${CLUSTER_DNS_DOMAIN}/" -e "s/__PILLAR__DNS__SERVER__/${CLUSTER_DNS_SVC_IP}/" coredns.yaml

echo "-------------- 部署coredns------------------"
kubectl apply -f coredns.yaml

echo "-------------- 检查 coredns 功能------------------"
kubectl get all -n kube-system


cat > my-nginx.yaml <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: my-nginx
spec:
  selector:
    matchLabels:
      run: my-nginx
  replicas: 2
  template:
    metadata:
      labels:
        run: my-nginx
    spec:
      containers:
      - name: my-nginx
        image: nginx:1.7.9
        ports:
        - containerPort: 80
EOF
kubectl apply -f my-nginx.yaml
kubectl expose deploy my-nginx
kubectl get services --all-namespaces |grep my-nginx

cat > dnsutils-ds.yml <<EOF
apiVersion: v1
kind: Service
metadata:
  name: dnsutils-ds
  labels:
    app: dnsutils-ds
spec:
  type: NodePort
  selector:
    app: dnsutils-ds
  ports:
  - name: http
    port: 80
    targetPort: 80
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: dnsutils-ds
  labels:
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  selector:
    matchLabels:
      app: dnsutils-ds
  template:
    metadata:
      labels:
        app: dnsutils-ds
    spec:
      containers:
      - name: my-dnsutils
        image: tutum/dnsutils:latest
        command:
          - sleep
          - "3600"
        ports:
        - containerPort: 80
EOF
kubectl apply -f dnsutils-ds.yml
kubectl get pods -lapp=dnsutils-ds

echo "睡眠10秒..."
sleep 10

kubectl get pods -lapp=dnsutils-ds

kubectl get pods -lapp=dnsutils-ds | awk '{print $1}' | while read pod_name
do
  if [ ${pod_name} != "NAME" ];then
    kubectl exec ${pod_name} cat /etc/resolv.conf
    kubectl exec ${pod_name} nslookup kubernetes
    kubectl exec ${pod_name} nslookup my-nginx
    kubectl exec ${pod_name} nslookup www.baidu.com
    kubectl exec ${pod_name} nslookup kube-dns.kube-system.svc
    kubectl exec ${pod_name} nslookup kube-dns.kube-system.svc.cluster.local
    kubectl exec ${pod_name} nslookup kube-dns.kube-system.svc.cluster.local.
    kubectl exec ${pod_name} nslookup elasticsearch-logging
  fi
done

kubectl delete -f my-nginx.yaml
kubectl delete svc my-nginx
kubectl delete -f dnsutils-ds.yml

 

configrc

#!/usr/bin/bash

export NODE_HOSTNAMES=(node1 node2 node3 node4 node5)
export NODE_IPS=(192.168.66.11 192.168.66.12 192.168.66.13 192.168.66.14 192.168.66.15)

export MASTER_HOSTNAMES=(node1 node2 node3)
export MASTER_IPS=(192.168.66.11 192.168.66.12 192.168.66.13)

# kube-apiserver 的反向代理(kube-nginx)地址端口
export KUBE_APISERVER=https://127.0.0.1:8443

# 生成 EncryptionConfig 所需的加密 key
export ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)

_etcd_endpoints=
_etcd_nodes=
for idx in ${!MASTER_IPS[@]}
do
  _name=${MASTER_HOSTNAMES[$idx]}
  _ip=${MASTER_IPS[$idx]} 

  if [ -z "$_etcd_nodes" ]; then 
    _etcd_nodes="$_name=https://$_ip:2380"
  else
    _etcd_nodes="$_etcd_nodes,$_name=https://$_ip:2380"
  fi

  if [ -z "$_etcd_endpoints" ]; then 
    _etcd_endpoints="https://$_ip:2379"
  else
    _etcd_endpoints="$_etcd_endpoints,https://$_ip:2379"
  fi
done

# etcd 集群服务地址列表
export ETCD_ENDPOINTS=$_etcd_endpoints

# etcd 集群间通信的 IP 和端口
export ETCD_NODES=$_etcd_nodes

# 节点间互联网络接口名称
export IFACE="ens33"

# etcd 数据目录
export ETCD_DATA_DIR="/data/k8s/etcd/data"

# etcd WAL 目录,建议是 SSD 磁盘分区,或者和 ETCD_DATA_DIR 不同的磁盘分区
export ETCD_WAL_DIR="/data/k8s/etcd/wal"

# k8s 各组件数据目录
export K8S_DIR="/data/k8s/k8s"

# docker 数据目录
export DOCKER_DIR="/data/k8s/docker"

## 以下参数一般不需要修改

# TLS Bootstrapping 使用的 Token,可以使用命令 head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 生成
BOOTSTRAP_TOKEN="41f7e4ba8b7be874fcff18bf5cf41a7c"

# 最好使用 当前未用的网段 来定义服务网段和 Pod 网段

# 服务网段,部署前路由不可达,部署后集群内路由可达(kube-proxy 保证)
SERVICE_CIDR="10.254.0.0/16"

# Pod 网段,建议 /16 段地址,部署前路由不可达,部署后集群内路由可达(flanneld 保证)
CLUSTER_CIDR="10.244.0.0/16"

# 服务端口范围 (NodePort Range)
export NODE_PORT_RANGE="30000-32767"

# flanneld 网络配置前缀
export FLANNEL_ETCD_PREFIX="/kubernetes/network"

# kubernetes 服务 IP (一般是 SERVICE_CIDR 中第一个IP)
export CLUSTER_KUBERNETES_SVC_IP="10.254.0.1"

# 集群 DNS 服务 IP (从 SERVICE_CIDR 中预分配)
export CLUSTER_DNS_SVC_IP="10.254.0.2"

# 集群 DNS 域名(末尾不带点号)
export CLUSTER_DNS_DOMAIN="cluster.local"

# 将二进制目录 /opt/k8s/bin 加到 PATH 中
export PATH=/opt/k8s/bin:$PATH

 

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值