前言
此为之前文章的参考,主要记录方便以后直接快速搭建
准备
服务器 | ip | 系统 | 角色 |
---|---|---|---|
master1 | 192.168.31.100 | centos7.6 | k8s-master节点1,ceph-node,ceph-osd,ceph-mds,ceph-mgr |
node1 | 192.168.31.101 | centos7.6 | k8s-node节点1,ceph-node,ceph-osd,ceph-mds |
node2 | 192.168.31.102 | centos7.6 | k8s-node节点2,ceph-node,ceph-osd,ceph-mds |
服务器软件准备
- 保证master1,node1,node2互通
- 配置hosts,hostname
# 配置hostname
hostnamectl set-hostname master1
hostnamectl set-hostname node1
hostnamectl set-hostname node2
# 每台机器配置hosts
cat >/etc/hosts <<EOF
192.168.31.100 master1
192.168.31.101 node1
192.168.31.102 node2
EOF
- 关闭防火墙,selinux,swap
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
- 配置内核参数,iptables
# 配置k8s iptables
cat << EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF
# 配置ceph iptables
cat > /etc/sysctl.d/ceph.conf <<EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
第一步-配置yum国内源
root@ALL:
- 安装必要工具
yum -y install wget epel-release yum-utils
- 修改本机源
# 配置国内epel源,备份原先源
mkdir /etc/yum.repos.d/bak && mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.cloud.tencent.com/repo/centos7_base.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.cloud.tencent.com/repo/epel-7.repo
# 修改国内ceph源
cat << EOF >/etc/yum.repos.d/ceph.repo
[ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=http://mirrors.aliyun.com/ceph/keys/release.asc
[ceph-x86_64]
name=Ceph packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=http://mirrors.aliyun.com/ceph/keys/release.asc
EOF
# 修改国内k8s源
cat << EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
# 配置国内docker源
wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
# 更新repo
yum clean all && yum makecache
- 安装docker,docker-compose并设置相关参数
yum -y install docker-compose docker-ce
systemctl start docker
systemctl enable docker
# 配置docker启动方式为systemd和为harbor服务器设置非https链接
cat << EOF > /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl restart docker
docker info | grep Cgroup
第三步-在master1节点上搭建haproxy+keepalived框架
root@master1:
- 安装相关插件
yum install -y haproxy keepalived net-tools nmap-ncat
- 配置haproxy
sed -i '$a\
#---------------------------------------------------------------------\
# kube-api-server-listen\
#---------------------------------------------------------------------\
listen kube-api-lb\
bind 0.0.0.0:10443\
mode tcp\
balance roundrobin\
server master1 192.168.31.100:6443 weight 1 maxconn 10000 check inter 10s\
\
#---------------------------------------------------------------------\
# kube-api-haproxy-stats\
#---------------------------------------------------------------------\
listen admin_stats\
bind 0.0.0.0:8099\
mode http\
option httplog\
maxconn 10\
stats refresh 30s\
stats uri /stats' /etc/haproxy/haproxy.cfg
- 配置keepalived
mkdir -p /etc/keepalived/scripts/
cat > /etc/keepalived/scripts/haproxy_check.sh << \EOF
#!/bin/bash
if [ `ps -C haproxy --no-header |wc -l` -eq 0 ]
then
systemctl start haproxy
sleep 3
if [ `ps -C haproxy --no-header |wc -l` -eq 0 ]
then
systemctl stop keepalived
fi
fi
EOF
cat > /etc/keepalived/scripts/notifi_master.sh << \EOF
#!/bin/bash
VIP=192.xx.xx.xx
GATEWAY=192.xx.xx.xx
/sbin/arping -I eth0 -c 5 -s $VIP $GATEWAY &>/dev/null
EOF
chmod +x /etc/keepalived/scripts/haproxy_check.sh /etc/keepalived/scripts/notifi_master.sh
cp /etc/keepalived/keepalived.conf /etc/keepalived/keepalived.conf.backup
- 这里把master1节点作为主节点,master2节点作为备用节点
在master1节点上配置主keepalived.conf
cat << EOF > /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id Haproxy-Master
script_user root
enable_script_security
vrrp_skip_check_adv_addr
vrrp_iptables
vrrp_garp_interval 0
vrrp_gna_interval 0
# vrrp_strict
}
vrrp_script chk_haproxy
{
script "/etc/keepalived/scripts/haproxy_check.sh"
interval 5
fall 2
}
vrrp_instance haproxy {
state MASTER
interface ens33
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 5e97s45a2
}
unicast_src_ip 192.168.31.100
unicast_peer {
192.168.31.101
192.168.31.102
}
track_script {
chk_haproxy
}
virtual_ipaddress {
192.168.31.150
}
notify_master "/etc/keepalived/scripts/notifi_master.sh"
}
EOF
- 启动keepalived
systemctl stop NetworkManager
systemctl start keepalived
systemctl enable keepalived
netstat -ntplu|grep 10443
第四步-部署k8s集群
root@ALL:
- 安装kubelet,kubectl,kubeadm
yum install -y kubelet-1.17.2 kubeadm-1.17.2 kubectl-1.17.2
systemctl enable kubelet
root@master1:
- 使用kubeadm部署master1
pod-network-cidr为flanneld网络默认地址
kubeadm init --kubernetes-version=1.17.2 \
--apiserver-advertise-address=192.168.31.100 \
--image-repository registry.aliyuncs.com/google_containers \
--service-cidr=10.1.0.0/16 \
--control-plane-endpoint "192.168.31.150:10443" \
--upload-certs \
--pod-network-cidr=10.244.0.0/16
- 记录加入master节点的token和加入node节点的token
- 配置kubectl工具
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get nodes
kubectl get cs
- 部署flanneld网络
创建文件flanneld.yaml
# 生成文件夹
mkdir -p yaml/flanneld
# 写入文件
cat << EOF > yaml/flanneld/flanneld.yaml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- <