Ⅰ、Prepare ubuntu
1、apt
mv /etc/apt/sources.list /etc/apt/sources.list.bak
cat /etc/apt/sources.list.bak | grep -v "#" | grep -v "^$" > sources.list
sed -i s/archive.ubuntu.com/mirrors.ustc.edu.cn/g /etc/apt/sources.list
sed -i s/security.ubuntu.com/mirrors.ustc.edu.cn/g /etc/apt/sources.list
apt -y update && apt -y upgrade
2、timedatectl
timedatectl set-timezone Asia/Shanghai
3、bash-completion
sed -i 97 ,99s/
4、ssh
echo "PermitRootLogin yes" >> /etc/ssh/sshd_config
passwd root << EOF
EOF
systemctl reload ssh
5、network
net = ` cat /etc/netplan/00-installer-config.yaml | awk 'NR==4{print $1 }' `
sed -i s/${net} \ :/eth0\ :/g /etc/netplan/00-installer-config.yaml
sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0"/g' /etc/default/gurb
update-gurb
reboot
Ⅱ、prepare for kubernetes
1、hosts
hostnamectl set-hostname k8s-master0x
hostnamectl set-hostname k8s-node0x
vim /etc/hosts
10.0 .0.5 k8s-master01
10.0 .0.6 k8s-master02
10.0 .0.7 k8s-master03
10.0 .0.8 k8s-node01
10.0 .0.9 k8s-node02
10.0 .0.10 k8s-bl-master
2、ssh-keygen
ssh-keygen -t rsa
for i in k8s-master01 k8s-master02 k8s-master03 k8s-node01 k8s-node02; do ssh-copy-id -i .ssh/id_rsa.pub $i ; done
3、swap
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1 /g' /etc/fstab
4、install ipvs
apt -y install ipvsadm ipset sysstat conntrack libseccomp2 libseccomp-dev
cat > /etc/modules-load.d/ipvs.conf << EOF
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
EOF
systemctl restart systemd-modules-load.service
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
5、install docker
apt -y install apt-transport-https ca-certificates curl software-properties-common
curl -fsSL https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu/gpg | sudo apt-key add -
apt -y install apt-transport-https ca-certificates curl software-properties-common
add-apt-repository "deb [arch=amd64] https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu $( lsb_release -cs) stable"
apt -y update && apt -y install docker-ce docker-ce-cli containerd.io
cat > /etc/docker/daemon.json << EOF
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
systemctl daemon-reload
systemctl restart docker
systemctl enable docker kubelet
reboot
Ⅲ、haproxy config
apt -y update
apt -y install haproxy
cd /etc/haproxy/
mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak
cat > haproxy.cfg << EOF
global
chroot /var/lib/haproxy
user haproxy
group haproxy
daemon
maxconn 4000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
frontend kubernetes-apiserver
mode tcp
bind *:6444
option tcplog
default_backend kubernetes-apiserver
backend kubernetes-apiserver
mode tcp
option tcplog
option tcp-check
balance roundrobin
server k8s-master01 10.0.0.5:6443 check
server k8s-master02 10.0.0.6:6443 check
server k8s-master03 10.0.0.7:6443 check
EOF
Ⅳ、keepalived config
apt -y update
apt -y install keepalived
cat > /etc/keepalived/keepalived.conf << EOF
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script check_haproxy {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
nopreempt
priority 101
advert_int 1
authentication {
auth_type PASS
auth_pass K8SHA_KA_AUTH
}
virtual_ipaddress {
10.0.0.10
}
track_script {
check_haproxy
}
}
EOF
cat > /etc/keepalived/check_apiserver.sh << EOF
#!/bin/bash
err=0
for k in $( seq 1 3 )
do
check_code=$( pgrep haproxy)
if [[ $check_code == "" ]]; then
err=$( expr $err + 1 )
sleep 1
continue
else
err=0
break
fi
done
if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
Ⅴ、install kubernetes
1、install kube{adm,ctl,let}
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
cat << EOF > /etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.ustc.edu.cn/kubernetes/apt/ kubernetes-xenial main
EOF
apt -y update && apt -y install kubelet kubeadm kubectl
2、kubeadm init
kubeadm init --apiserver-advertise-address= 10.0 .0.5 --control-plane-endpoint= 10.0 .0.10:6444 --pod-network-cidr= 10.244 .0.0/16 --service-cidr= 10.96 .0.0/16 --upload-certs --image-repository registry.aliyuncs.com/google_containers --kubernetes-version= 1.22 .0
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME /.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME /.kube/config
sudo chown $( id -u) : $( id -g) $HOME /.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG = /etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 10.0 .0.10:6444 --token uxutjv.9cfkxkxvddltasxj \
--discovery-token-ca-cert-hash sha256:405ccc9558e5b519d7e233b8bcc10ad4a37de275c7a434efec5d09e7b9732c2c \
--control-plane --certificate-key a4385e9ee7e83013e728f400484a7e1c64a0092a80b120268d1b401bb1c517d7
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.0 .0.10:6444 --token uxutjv.9cfkxkxvddltasxj \
--discovery-token-ca-cert-hash sha256:405ccc9558e5b519d7e233b8bcc10ad4a37de275c7a434efec5d09e7b9732c2c
kubeadm join 10.0 .0.10:6444 --token uxutjv.9cfkxkxvddltasxj \
--discovery-token-ca-cert-hash sha256:405ccc9558e5b519d7e233b8bcc10ad4a37de275c7a434efec5d09e7b9732c2c \
--control-plane --certificate-key a4385e9ee7e83013e728f400484a7e1c64a0092a80b120268d1b401bb1c517d7
mkdir -p $HOME /.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME /.kube/config
sudo chown $( id -u) : $( id -g) $HOME /.kube/config
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /etc/profile
source /etc/profile
Ⅵ、install calico
kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml
Ⅶ、install dashboarb
1、token
kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml
kubectl create clusterrolebinding dashboard-cluster-admin --clusterrole= cluster-admin --serviceaccount= kubernetes-dashboard:kubernetes-dashboard
---
kubectl edit svc kubernetes-dashboard -n kubernetes-dashboard
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: "2021-11-02T15:26:24Z"
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
resourceVersion: "7527"
uid: 9d20b2d1-537d-49fd-8d1a-77b00d1bc220
spec:
clusterIP: 10.96 .218.55
clusterIPs:
- 10.96 .218.55
externalTrafficPolicy: Cluster
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- nodePort: 30254
port: 443
protocol: TCP
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
sessionAffinity: None
type: NodePort
status:
loadBalancer: { }
---
kubectl describe secret kubernetes-dashboard-token- -n kubernetes-dashboard | awk 'NR==13{print $2 }' > token
---
cat token
eyJhbGciOiJSUzI1NiIsImtpZCI6InFEd2phNURsWjQwcU9Ld3Iyd3VDd19MWVRINXVSOUVwVFJFSXNCR0lIbVEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi13aDVqdyIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6ImFmMzcwNzJlLTJhOTUtNDA2ZS1hYmE4LTJkOTY1NzE0NTAzMSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.Su7QGSlUZE_j4eMuSKvy8k6GdNQL2sttBnJfw6POyYDxdDJ2lvxwJtrrvZkPGZFWHC3awKO7rqQKdAGghwcUbmTpMlL-8t-VilqFbB-mcST8w0H9SF8LqEiGkJ9h7DWxNltSH8FaXkHMlX51CgoT8TX5C28kY84Zr69-N7F9DnNYK4V1xJPoJxCzYJHeSA_z0rYHFNfidk3BY5mV_W2qzrHg8OuyI50scoaPkPLY9-Hms4OhxOcH8vI0zIq9fWTXviPimf_EORg7w5fy4AoVD-RiGwkhpbcgHnHUz_z7WDbVe5Av8IhrTjZ-1VCrRaQrH8TOUOmQiztEpMGDjdvkgw
2、kubeconfig
kubectl config set-cluster kubernetes --certificate-authority= /etc/kubernetes/pki/ca.crt --server= "https://10.0.0.10:6443" --embed-certs= true --kubeconfig= /root/dashboard-admin.conf
DEF_NS_ADMIN_TOKEN = $( kubectl get secret kubernetes-dashboard-token-wh5jw -n kubernetes-dashboard -o jsonpath = { .data.token} | base64 -d)
kubectl config set-credentials dashboard-admin --token= $DEF_NS_ADMIN_TOKEN --kubeconfig= /root/dashboard-admin.conf
kubectl config set-context dashboard-admin@kubernetes --cluster= kubernetes --user= dashboard-admin --kubeconfig= /root/dashboard-admin.conf
kubectl config use-context dashboard-admin@kubernetes --kubeconfig= /root/dashboard-admin.conf