1.集群信息
NodeName | Role | IP | Software |
---|---|---|---|
cka-master | Master | 192.168.100.10 | kubeadm、kubelet、kubectl、docker |
cka-node1 | Node | 192.168.100.11 | kubeadm、kubelet、kubectl、docker |
cka-node2 | Node | 192.168.100.11 | kubeadm、kubelet、kubectl、docker |
ServiceNetwork | Network | 10.96.0.0/16 | |
cka-PodNetwork | Network | 10.244.0.0/16 |
2.集群环境准备
- 修改主机名
#cka-master
hostnamectl set-hostname cka-master
#cka-node1
hostnamectl set-hostname cka-node1
#cka-node2
hostnamectl set-hostname cka-node2
以下内容在所有节点操作
- 设置时间同步
timedatectl set-timezone Asia/Shanghai
timedatectl set-ntp 1
- 关闭SWAP分区
#临时
swapoff -a
#永久
sed -ri 's/.*swap.*/#&/' /etc/fstab
- 添加hosts
cat >> /etc/hosts << EOF
192.168.196.10 cka-master
192.168.196.20 cka-node1
192.168.196.30 cka-node2
EOF
- 清空iptables/关闭firewalld/关闭SELinux
iptables -F
iptables -t nat -F
systemctl stop firewalld.service && systemctl disable firewalld.service
setenforce 0
sed -i '/^SELINUX=/c\SELINUX=disabled' /etc/selinux/config
- 修改内核参数
cat > /etc/sysctl.conf << EOF
vm.swappiness = 0
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
sysctl -p
-------------
如果出现如下问题:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
需要安装iptables: yum install -y iptables
- 加载内核模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- br_netfilter
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
-----
chmod 755 /etc/sysconfig/modules/ipvs.modules && \
bash /etc/sysconfig/modules/ipvs.modules && \
lsmod | grep -E "ip_vs|nf_conntrack_ipv4"
------------------
打印类似如下结果为正常:
nf_conntrack_ipv4 15053 10
nf_defrag_ipv4 12729 1 nf_conntrack_ipv4
ip_vs_sh 12688 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 0
ip_vs 145497 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 133095 7 ip_vs,nf_nat,nf_nat_ipv4,xt_conntrack,nf_nat_masquerade_ipv4,nf_conntrack_netlink,nf_conntrack_ipv4
libcrc32c 12644 4 xfs,ip_vs,nf_nat,nf_conntrack
- 配置yum源
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo
- 重启所有服务器
init 6
3. 安装Docker
- 安装必要的一些系统工具
yum install -y yum-utils device-mapper-persistent-data lvm2
- 添加软件源信息
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
- 更新并安装Docker-CE
yum makecache fast && yum -y install docker-ce
- 开启Docker服务以及开机自启
systemctl restart docker && systemctl enable docker
4. 安装Kubernetes集群
- 添加kubernetes安装源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
- 安装kubeadm、kubelet、kubectl以及kubelet开机自启
yum install -y kubelet kubeadm kubectl
systemctl enable kubelet && systemctl start kubelet
- 查看Kubernetes集群需要的镜像名称列表
#稳定版本
kubeadm config images list --kubernetes-version=stable
#指定版本
kubeadm config images list --kubernetes-version=1.20.2
k8s.gcr.io/kube-apiserver:v1.20.2
k8s.gcr.io/kube-controller-manager:v1.20.2
k8s.gcr.io/kube-scheduler:v1.20.2
k8s.gcr.io/kube-proxy:v1.20.2
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.13-0
k8s.gcr.io/coredns:1.7.0
- 初始化Kubernetes集群
kubeadm init --kubernetes-version 1.20.2 --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers --service-cidr=10.96.0.0/16 --pod-network-cidr=10.244.0.0/16
---------------------------------------
addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.100.10:6443 --token kukq9v.flujy8291in9nbhv \
--discovery-token-ca-cert-hash sha256:9f7cf40829126eea596258c3c400b13e4f7a7c86a5416c470a073ce994888b42
- 主节点执行初始化操作
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
- Node节点加入集群命令
kubeadm join 192.168.100.10:6443 --token kukq9v.flujy8291in9nbhv \
--discovery-token-ca-cert-hash sha256:9f7cf40829126eea596258c3c400b13e4f7a7c86a5416c470a073ce994888b42
- 生成新Token加入集群命令
#Master节点执行
kubeadm token create --print-join-command
kubeadm join 192.168.100.10:6443 --token f7xbb9.i5qdltuwaq6ir9ud --discovery-token-ca-cert-hash sha256:9f7cf40829126eea596258c3c400b13e4f7a7c86a5416c470a073ce994888b42
- 添加新Node节点
kkubeadm join 192.168.100.10:6443 --token f7xbb9.i5qdltuwaq6ir9ud \
--discovery-token-ca-cert-hash sha256:9f7cf40829126eea596258c3c400b13e4f7a7c86a5416c470a073ce994888b42
- 查看当前Token命令
#Master节点执行
kubeadm token list
- 删除Master污点提供Node功能
kubectl taint nodes cka-master node-role.kubernetes.io/master-
5. 安装Flannel
- 下载kube-flannel.yml文件(需科学上网)
https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
- kube-flannel.yml 文件内容
#kubectl apply -f kube-flannel.yml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
#image: quay.io/coreos/flannel:v0.13.1-rc1
image: registry.cn-chengdu.aliyuncs.com/inodb/flannel:v0.13.1-rc1 #修改为国内镜像源
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
#image: quay.io/coreos/flannel:v0.13.1-rc1
image: registry.cn-chengdu.aliyuncs.com/inodb/flannel:v0.13.1-rc1 #修改为国内镜像源
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
7. 查看Kubernetes集群状态
kubectl get nodes
kubectl get pods -n kube-system
8. 镜像下载脚本
#!/bin/bash
url=registry.cn-hangzhou.aliyuncs.com/google_containers
version=v1.14.2
images=(`kubeadm config images list --kubernetes-version=$version|awk -F '/' '{print $2}'`)
for imagename in ${images[@]} ; do
docker pull $url/$imagename
docker tag $url/$imagename k8s.gcr.io/$imagename
docker rmi -f $url/$imagename
done