#Netplan Yaml#eth0 is using DHCP hence no need to provisioned.network:version:2renderer: NetworkManager
ethernets:eth1:dhcp4: no
addresses:- 192.168.4.100/24
# IPTables
$ sudonano-l /etc/sysctl.conf
net.ipv4.ip_forward=1
$ sudo iptables -F# flush the default tables
$ sudo iptables -t nat -F# flush the nat table
$ sudo iptables -P INPUT ACCEPT # default behavior. just in case
$ sudo iptables -P FORWARD ACCEPT # default behavior. just in case
$ sudo iptables -t nat -A POSTROUTING -s192.168.4.0/24 -o eth0 -j MASQUERADE
$ sudo iptables -t nat -L# verify configuration
$ sudoaptinstall iptables-persistent
$ sudoservice netfilter-persistent status # check the status of the service
network:version:2renderer: NetworkManager
ethernets:eth0:dhcp4: no
addresses:- 192.168.4.101/24
gateway4: 192.168.4.100
nameservers:addresses:- <dns address assigned by your ISP>- 8.8.8.8 # or well known dns servers. Please note # does not work in yaml
# Note: In v1.22 and later, if the user does not set the cgroupDriver field under KubeletConfiguration, kubeadm defaults it to systemd. hence no configuration file is created specifically.# update /etc/hosts to include controller-endpoint
controller $ sudonano-l /etc/hosts
192.168.4.<controller ip> controller-endpoint
# uses aliyun mirror
controller $ sudo kubeadm init --image-repository registry.aliyuncs.com/google_containers \
--service-cidr 10.1.0.0/16 \
--pod-network-cidr 10.2.0.0/16 \
--apiserver-advertise-address 192.168.4.<controller ip>\
--control-plane-endpoint controller-endpoint \--v=5# run as current user without sudo
controller $ mkdir-p$HOME/.kube
controller $ sudocp-i /etc/kubernetes/admin.conf $HOME/.kube/config
controller $ sudochown$(id-u):$(id-g)$HOME/.kube/config
加入Worker Nodes
# in case the original joining period expires, or you forgot the token# issue the following on controller node# controller$ kubeadm token list# or recreate one# controller$ sudo kubeadm token create --print-join-command# on worker nodes, just copy & paste (remember to prefix with sudo)
worker $ sudonano-l /etc/hosts
192.168.19.<controller ip> controller-endpoint
worker $ sudo kubeadm join<controller_192.168.19_ip>:6443 --token<token> --discovery-token-ca-cert-hash sha256:<hash>
Sanity Check
$ kubectl get nodes
NAME STATUS ROLES AGE VERSION
controller NotReady control-plane 4m42s v1.27.4
worker1 NotReady <none> 7s v1.27.3
$ kubectl get pod --namespace=kube-system
# Please noted that coredns is not up as no network plugins have been installed yet.
NAME READY STATUS RESTARTS AGE
coredns-7bdc4cb885-564sh 0/1 Pending 0 6m45s
coredns-7bdc4cb885-7l7tg 0/1 Pending 0 6m45s
etcd-controller 1/1 Running 1(4m57s ago) 6m59s
kube-apiserver-controller 1/1 Running 1(4m57s ago) 6m59s
kube-controller-manager-controller 1/1 Running 1(4m57s ago) 7m
kube-proxy-kmbqm 1/1 Running 0 2m27s
kube-proxy-twqt7 1/1 Running 1(4m57s ago) 6m45s
kube-scheduler-controller 1/1 Running 1(4m57s ago) 6m59s