1.主机初始化
cat >> /etc/hosts <<'EOF'
192.168.6.63 k8s1
192.168.6.64 k8s2
192.168.6.65 k8s3
192.168.6.66 k8s4
192.168.6.67 k8s5
192.168.6.68 k8s6
192.168.6.80 master-lb
EOF
swapoff -a
sysctl -w vm.swappiness=0
sed -ri '/^[^#]*swap/s@^@#@' /etc/fstab
echo vm.swappiness=0 >>/etc/sysctl.conf
sysctl -p
cat <<EOF | tee /etc/modules-load.d/k8s.conf
br_netfilter
EOF
cat <<EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system
yum clean all
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
sed -i '/aliyuncs/d' /etc/yum.repos.d/CentOS-Base.repo
sed -i '/aliyuncs/d' /etc/yum.repos.d/epel.repo
yum install https://www.elrepo.org/elrepo-release-7.el7.elrepo.noarch.rpm -y
yum --enablerepo=elrepo-kernel install kernel-ml -y
grub2-set-default 0
grub2-mkconfig -o /boot/grub2/grub.cfg
reboot
curl -o /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.tuna.tsinghua.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo
yum list docker-ce --showduplicates
yum -y install docker-ce-19.03.15 docker-ce-cli-19.03.15
yum -y install docker-ce
yum -y install bash-completion
source /usr/share/bash-completion/bash_completion
mkdir -pv /etc/docker && cat <<EOF | sudo tee /etc/docker/daemon.json
{
"registry-mirrors": ["https://6wdq83of.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl daemon-reload
systemctl enable --now docker
cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
EOF
yum -y list kubeadm --showduplicates | sort -r
yum -y install kubeadm-1.22.15-0 kubelet-1.22.15-0 kubectl-1.22.15-0
systemctl enable --now kubelet
systemctl status kubelet
2.高可用组件部署(master节点部署)
mkdir -p /server/tools
cd /server/tools
wget http://nginx.org/download/nginx-1.22.1.tar.gz
yum install pcre-devel openssl-devel -y
tar xf nginx-1.22.1.tar.gz
cd nginx-1.22.1
./configure --prefix=/usr/local/nginx \
--with-pcre \
--with-http_ssl_module \
--with-http_stub_status_module \
--with-stream \
--with-http_gzip_static_module
make -j2 && make install
cat >/usr/local/nginx/conf/nginx.conf <<'EOF'
user nginx;
worker_processes auto;
events {
worker_connections 10240;
}
error_log /usr/local/nginx/logs/error.log info;
stream {
upstream kube-apiservers {
hash $remote_addr consistent;
server k8s1:6443 weight=5 max_fails=1 fail_timeout=3s;
server k8s2:6443 weight=5 max_fails=1 fail_timeout=3s;
server k8s3:6443 weight=5 max_fails=1 fail_timeout=3s;
}
server {
listen 8443 reuseport;
proxy_connect_timeout 3s;
proxy_timeout 3000s;
proxy_pass kube-apiservers;
}
}
EOF
useradd nginx -s /sbin/nologin -M
cat >/usr/lib/systemd/system/nginx.service <<'EOF'
[Unit]
Description=The nginx HTTP and reverse proxy server
After=network-online.target remote-fs.target nss-lookup.target
Wants=network-online.target
[Service]
Type=forking
ExecStartPre=/usr/local/nginx/sbin/nginx -t -c /usr/local/nginx/conf/nginx.conf
ExecStart=/usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf
ExecReload=/usr/local/nginx/sbin/nginx -s reload
ExecStop=/usr/local/nginx/sbin/nginx -s stop
TimeoutStopSec=5
Restart=on-failure
RestartSec=42s
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl start nginx
systemctl enable nginx
netstat -lntup|grep nginx
yum install keepalived -y
cp /etc/keepalived/keepalived.conf{,.bak}
cat >/etc/keepalived/keepalived.conf <<'EOF'
! Configuration File for keepalived
global_defs {
# 改为每台机器的IP
router_id 192.168.6.63
enable_script_security
}
vrrp_script check_k8s {
script "/server/scripts/check-k8s.sh"
interval 2
weight 2
fall 3
rise 2
}
vrrp_instance k8s-master {
# 其他两台机器改为 BACKUP
state MASTER
#自己网卡名字 有eth0写eth0的ip 没有写网卡名
interface ens192
virtual_router_id 51
# 其他两台机器优先级低于100,要不同
priority 100
advert_int 1
# 每台机器mcast_src_ip不一样,自己网卡名字 有eth0写eth0的ip 没有写网卡名
mcast_src_ip ens192
authentication {
auth_type PASS
auth_pass k8s-master-ha
}
track_script {
check_k8s
}
virtual_ipaddress {
192.168.6.80/24
}
}
EOF
mkdir -p /server/scripts/
cat >/server/scripts/check-k8s.sh <<'EOF'
#!/bin/bash
function check_kube-apiserver(){
for i in `seq 5`
do
pid=`pgrep kube-apiserver`
if [[ -n $pid ]]
then
sleep 1
continue
else
pid=0
break
fi
done
}
check_kube-apiserver
if [[ $pid -eq 0 ]]
then
systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
chmod +x /server/scripts/check-k8s.sh
systemctl start keepalived
systemctl enable keepalived
type=SERVICE_STOP msg=audit(1693210185.280:2116): pid=1 uid=0 auid=4294967295 ses=4294967295 subj=kernel msg='unit=keepalived comm="systemd" exe="/usr/lib/systemd/systemd" hostname=? addr=? terminal=? res=success'
3.初始化master节点
mkdir -p /data/k8s/master
cd /data/k8s/master
kubeadm config print init-defaults > kubeadm-init.yml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.6.63
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
imagePullPolicy: IfNotPresent
name: k8s1
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: 1.22.0
controlPlaneEndpoint: 192.168.6.80:8443
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
podSubnet: 10.244.0.0/16
scheduler: {}
kubeadm init --config kubeadm-init.yml --dry-run
4. 预拉取镜像
kubeadm config images list --config kubeadm-init.yml
registry.aliyuncs.com/google_containers/kube-apiserver:v1.22.0
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.22.0
registry.aliyuncs.com/google_containers/kube-scheduler:v1.22.0
registry.aliyuncs.com/google_containers/kube-proxy:v1.22.0
registry.aliyuncs.com/google_containers/pause:3.5
registry.aliyuncs.com/google_containers/etcd:3.5.0-0
registry.aliyuncs.com/google_containers/coredns:v1.8.4
5.基于 kubeadm 配置文件初始化集群
kubeadm init --config kubeadm-init.yml --upload-certs
systemctl enable --now kubelet
systemctl status kubelet
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl -n kube-system get cm kubeadm-config -o yaml
kubeadm join 192.168.6.80:8443 --token rklw7e.jg7gojsohytm856r --discovery-token-ca-cert-hash sha256:874d0e3c953d00474c3739c38eb4b27191696607cab2a06a7efcdb7aeb065050
kubeadm reset
rm -fr ~/.kube/ /etc/kubernetes/* /var/lib/etcd/*
6.master节点加入集群
kubeam token list
kubeadm token create --print-join-command --ttl 0
kubeadm join 192.168.6.80:8443 --token rklw7e.jg7gojsohytm856r --discovery-token-ca-cert-hash sha256:874d0e3c953d00474c3739c38eb4b27191696607cab2a06a7efcdb7aeb065050
kubectl -n kube-system get secret
kubectl -n kube-system get secret bootstrap-token-rklw7e -o yaml
kubeadm init phase upload-certs --upload-certs
39221397d694956cfd73e0102421a8877fdb8a46c25cadebc07738cc72aef6f2
kubeadm join 192.168.6.80:8443 --token rklw7e.jg7gojsohytm856r --discovery-token-ca-cert-hash sha256:874d0e3c953d00474c3739c38eb4b27191696607cab2a06a7efcdb7aeb065050 \
--control-plane --certificate-key 39221397d694956cfd73e0102421a8877fdb8a46c25cadebc07738cc72aef6f2
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
date +%s | md5sum | cut -c 5-20
kubeadm token create diypas.69ad50b9bfcc3a52 --ttl 0 --print-join-command
kubeadm token delete diypas.69ad50b9bfcc3a52
7.安装网络插件
cat > flannel.yml <<'EOF'
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
seLinux:
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
imagePullSecrets:
- name: harbor-key
initContainers:
- name: install-cni-plugin
imagePullPolicy: Always
image: rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
command:
- cp
args:
- -f
- /flannel
- /opt/cni/bin/flannel
volumeMounts:
- name: cni-plugin
mountPath: /opt/cni/bin
- name: install-cni
imagePullPolicy: Always
image: rancher/mirrored-flannelcni-flannel:v0.18.1
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
imagePullPolicy: Always
image: rancher/mirrored-flannelcni-flannel:v0.18.1
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: xtables-lock
mountPath: /run/xtables.lock
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni-plugin
hostPath:
path: /opt/cni/bin
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
EOF
kubectl apply -f flannel.yml
8.kube-proxy 修改为 ipvs 模式
yum -y install conntrack-tools ipvsadm
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
kubectl -n kube-system edit cm kube-proxy
kubectl -n kube-system describe cm kube-proxy | grep mode
kubectl get pods -A | grep kube-proxy | awk '{print $2}' | xargs kubectl -n kube-system delete pods
kubectl -n kube-system logs -f kube-proxy-xxx
9.面板
kubectl apply -f https://addons.kuboard.cn/kuboard/kuboard-v3.yaml
kubectl get -n kuboard po
http://192.168.6.63:30080/sso/auth/default?req=rxk5odwrmhw53tydq6gzh4mpv
用户名: admin
密码: Kuboard123
kubectl delete -f https://addons.kuboard.cn/kuboard/kuboard-v3.yaml