尉氏docker安装服务kubeadm方式
准备三台机子:
192.168.15.31(master)
192.168.15.32(node1)
192.168.15.33(node2)
zhu:防火墙、selinux已经关闭了
1.三台机子统一进行。关闭swap分区
[root@Centos7 ~]# vim /etc/fstab
#UUID=80531769-f17b-4c36-82b5-7ea321c5f853 swap swap defaults 0 0
[root@Centos7 ~]# swapoff -a
[root@Centos7 ~]# echo 'KUBELET_EXTRA_ARGS="--fail-swap-on=false"' > /etc/sysconfig/kubelet
2.在master修改主机名并作域名解析
[root@Centos7 ~]# vim /etc/hosts
192.168.15.31 k8s-master-01 m1
192.168.15.32 k8s-node-01 n1
192.168.15.33 k8s-node-02 n2
3.三台机子上都修改主机名
[root@Centos7 ~]# hostnamectl set-hostname k8s-master-01
[root@Centos7 ~]# hostnamectl set-hostname k8s-node-01
[root@Centos7 ~]# hostnamectl set-hostname k8s-node-02
4.三台机子上都设置免密登录
[root@k8s-master-01 ~]# for i in m1 n1 n2;do ssh-copy-id -i ~/.ssh/id_rsa.pub root@$i;done
5.三台机都这样升级:升级所有包同时也升级软件和系统内核
[root@k8s-master-01 ~]# yum update -y
[root@k8s-node-01 ~]# yum update -y
[root@k8s-node-02 ~]# yum update -y
6.三台机子都这样搞,更换yum源头换成国内的阿里云的,这样快 。同时更新系统
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum update -y --exclud=kernel*
7.三台机都这样升级:升级所有包同时也升级软件和系统内核
[root@k8s-master-01 ~]# yum update -y
[root@k8s-master-01 ~]# cat /etc/redhat-release
CentOS Linux release 7.9.2009 (Core)
8.登录下面网址分别下载内核5.4.107内核版本(内核要求是 4.18+,如果是CentOS 8
则不需要升级内核)
https://elrepo.org/linux/kernel/el7/x86_64/RPMS/
[root@k8s-master-01 ~]# wget https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-lt-5.4.107-1.el7.elrepo.x86_64.rpm
[root@k8s-master-01 ~]# wget https://elrepo.org/linux/kernel/el7/x86_64/RPMS/kernel-lt-devel-5.4.107-1.el7.elrepo.x86_64.rpm
[root@k8s-master-01 ~]# ll
-rw-r--r-- 1 root root 52402056 3月 20 23:06 kernel-lt-5.4.107-1.el7.elrepo.x86_64.rpm
-rw-r--r-- 1 root root 13462492 3月 20 23:06 kernel-lt-devel-5.4.107-1.el7.elrepo.x86_64.rpm
9.讲三台机子这两个下号的内核rpm包凡在/opt/下面 并且本地安装
[root@k8s-master-01 ~]# mv kernel-lt-* /opt/
[root@k8s-master-01 ~]# cd /opt
[root@k8s-master-01 opt]# ls
kernel-lt-5.4.107-1.el7.elrepo.x86_64.rpm kernel-lt-devel-5.4.107-1.el7.elrepo.x86_64.rpm
[root@k8s-master-01 ~]#yum localinstall -y kernel-lt*
已安装:
kernel-lt.x86_64 0:5.4.107-1.el7.elrepo kernel-lt-devel.x86_64 0:5.4.107-1.el7.elrepo
grub2-set-default 0 && grub2-mkconfig -o /etc/grub2.cfg
grubby --default-kernel
10.系统内核参数优化
1.# 三台机子安装 IPVS(ipvs 是系统内核中的一个模块,其网络转发性能很高。一般情况下,我们首选 ipvs)
[root@k8s-master-01 ~]# yum install -y conntrack-tools ipvsadm ipset conntrack libseccomp
2.# 加载 IPVS 模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
3.#chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp.keepaliv.probes = 3
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp.max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp.max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.top_timestamps = 0
net.core.somaxconn = 16384
EOF
4# 立即生效
sysctl --system
5.#重启虚拟机
reboot
6.#检验
11.三台机子安装基础软件方便我们的日常使用
[root@k8s-master-01 ~]# yum install wget expect vim net-tools ntp bash-completion ipvsadm ipset jq iptables conntrack sysstat libseccomp -y
12三台机子上安装Docker(Docker 主要是作为 k8s 管理的常用的容器工具之一。)(centos7上这样安装至于8可以其他安装,在老师的版本上有)
#注意将这些全部复制到命令行一次性安装
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install docker-ce -y
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{ "registry-mirrors": ["https://8mh75mhz.mirror.aliyuncs.com"] }
EOF
sudo systemctl daemon-reload ; systemctl restart docker;systemctl enable --now docker.service
13.结果显示:(其实我用了老师的阿里云加速器)
完毕!
[root@k8s-master-01 ~]# sudo mkdir -p /etc/docker
[root@k8s-master-01 ~]# sudo tee /etc/docker/daemon.json <<-'EOF'
> { "registry-mirrors": ["https://8mh75mhz.mirror.aliyuncs.com"] }
> EOF
{ "registry-mirrors": ["https://8mh75mhz.mirror.aliyuncs.com"] }
[root@k8s-master-01 ~]# sudo systemctl daemon-reload ; systemctl restart docker;systemctl enable --now docker.service
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
14.三台机子上都同步集群时间
在集群当中,时间是一个很重要的概念,一旦集群当中某台机器时间跟集群时间不一致,可能会导致集群面 临很多问题。所以,在部署集群之前,需要同步集群当中的所有机器的时间
root@k8s-master-01 ~]# ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
[root@k8s-master-01 ~]# echo 'Asia/Shanghai' > /etc/timezone
[root@k8s-master-01 ~]# ntpdate time2.aliyun.com
25 Mar 11:26:57 ntpdate[2254]: step time server 203.107.6.88 offset -28800.031671 sec
[root@k8s-master-01 ~]# crontab -e
# 写入定时任务 */1 * * * * ntpdate time2.aliyun.com > /dev/null 2>&1
[root@k8s-master-01 ~]# crontab -l
*/1 * * * * ntpdate time2.aliyun.com > /dev/null 2>&1
15.三台机子安装/kubernetes
还是一次性丢入命令行
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
setenforce 0
yum install -y kubelet kubeadm kubectl
systemctl enable kubelet && systemctl start kubelet
16.安装结构显示
已安装:
kubeadm.x86_64 0:1.20.5-0 kubectl.x86_64 0:1.20.5-0 kubelet.x86_64 0:1.20.5-0
作为依赖被安装:
cri-tools.x86_64 0:1.13.0-0 kubernetes-cni.x86_64 0:0.8.7-0 socat.x86_64 0:1.7.3.2-2.el7
完毕!
17.设置开机自启和启动 kubelet
systemctl enable kubelet && systemctl start kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
18.集群初始化(从现在开始就在主节点master操作啦,这个仓库是老师搭建的)
kubeadm init \
--image-repository=registry.cn-hangzhou.aliyuncs.com/k8sos \
--kubernetes-version=v1.20.2 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16
19.集群初始化
kubeadm init \
--image-repository=registry.cn-hangzhou.aliyuncs.com/k8sos \
--kubernetes-version=v1.20.2 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16
20.运行结果显示成功啦。镜像也下载成功了
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.15.31:6443 --token xgynpw.ieuzvwfi3towjudd \
--discovery-token-ca-cert-hash sha256:e7446e9d08111a7ef94e4ce7c6a0f1c597312e4fe14de1bd367d92217bbf30aa
[root@k8s-master-01 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
registry.cn-hangzhou.aliyuncs.com/k8sos/kube-proxy v1.20.2 43154ddb57a8 2 months ago 118MB
registry.cn-hangzhou.aliyuncs.com/k8sos/kube-controller-manager v1.20.2 a27166429d98 2 months ago 116MB
registry.cn-hangzhou.aliyuncs.com/k8sos/kube-apiserver v1.20.2 a8c2fdb8bf76 2 months ago 122MB
registry.cn-hangzhou.aliyuncs.com/k8sos/kube-scheduler v1.20.2 ed2c44fbdd78 2 months ago 46.4MB
registry.cn-hangzhou.aliyuncs.com/k8sos/etcd 3.4.13-0 0369cf4303ff 6 months ago 253MB
registry.cn-hangzhou.aliyuncs.com/k8sos/coredns 1.7.0 bfe3a36ebd25 9 months ago 45.2MB
registry.cn-hangzhou.aliyuncs.com/k8sos/pause 3.2 80d28bedfe5d 13 months ago 683kB
21.注意:运行一下成功结果显示的要用这个软件的命令
#建立用户集群权限
[root@k8s-master-01 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master-01 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
#这个是以root用户
[root@k8s-master-01 ~]# export KUBECONFIG=/etc/kubernetes/admin.conf
- 安装网络插件
立面内容后附
[root@k8s-master-01 ~]# vi flannel.yaml
[root@k8s-master-01 ~]# cat flannel.yaml |grep image
image: registry.cn-hangzhou.aliyuncs.com/alvinos/flanned:v0.13.1-rc1
image: registry.cn-hangzhou.aliyuncs.com/alvinos/flanned:v0.13.1-rc1
[root@k8s-master-01 ~]# kubectl apply -f flannel.yaml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
23.用这个命令检查集群创建成功了没 主要看1/1是否都是1/1
[root@k8s-master-01 ~]# kubectl get pods -o wide -n kube-system
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
coredns-f68b4c98f-5wwv8 1/1 Running 0 174m 10.244.0.2 k8s-master-01 <none> <none>
coredns-f68b4c98f-849km 1/1 Running 0 174m 10.244.0.3 k8s-master-01 <none> <none>
etcd-k8s-master-01 1/1 Running 0 174m 192.168.15.31 k8s-master-01 <none> <none>
kube-apiserver-k8s-master-01 1/1 Running 0 174m 192.168.15.31 k8s-master-01 <none> <none>
kube-controller-manager-k8s-master-01 1/1 Running 0 174m 192.168.15.31 k8s-master-01 <none> <none>
kube-flannel-ds-6kmjt 1/1 Running 0 2m50s 192.168.15.31 k8s-master-01 <none> <none>
kube-proxy-pnzqc 1/1 Running 0 174m 192.168.15.31 k8s-master-01 <none> <none>
kube-scheduler-k8s-master-01 1/1 Running 0 174m 192.168.15.31 k8s-master-01 <none> <none>
24.在master上检查node节点安装成功否,发现只有一个,将#号后面的内容分别在n1和n2上执行一次
[root@k8s-master-01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master-01 Ready control-plane,master 177m v1.20.5
##号后面的内容分别在n1和n2上执行一次
kubeadm join 192.168.15.31:6443 --token xgynpw.ieuzvwfi3towjudd \
--discovery-token-ca-cert-hash sha256:e7446e9d08111a7ef94e4ce7c6a0f1c597312e4fe14de1bd367d92217bbf30aa
# 再检查一次节点安装情况,显示ready都是,说明ok了
[root@k8s-master-01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master-01 Ready control-plane,master 3h1m v1.20.5
k8s-node-01 Ready <none> 2m23s v1.20.5
k8s-node-02 Ready <none> 2m18s v1.20.5
25.测试集群DNS
[root@k8s-master-01 ~]# kubectl run test -it --rm --image=busybox:1.28.3
/ # nslookup kubernetes
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
Name: kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
26.成功啦!…
[root@k8s-master-01 ~]# kubectl run test -it --rm --image=busybox:1.28.3
If you don't see a command prompt, try pressing enter.
/ # nslookup kubernetes
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local
Name: kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local
附带:
- vi flannel.yaml
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
1. configMap
2. secret
3. emptyDir
4. hostPath
allowedHostPaths:
5. pathPrefix: "/etc/cni/net.d"
6. pathPrefix: "/etc/kube-flannel"
7. pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN', 'NET_RAW']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
8. min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unused in CaaSP
rule: 'RunAsAny'
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
9. apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
10. apiGroups:
11. ""
resources:
12. pods
verbs:
13. get
14. apiGroups:
15. ""
resources:
16. nodes
verbs:
17. list
18. watch
19. apiGroups:
20. ""
resources:
21. nodes/status
verbs:
22. patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
23. kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
selector:
matchLabels:
app: flannel
template:
metadata:
labels:
tier: node
app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
hostNetwork: true
priorityClassName: system-node-critical
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: registry.cn-hangzhou.aliyuncs.com/alvinos/flanned:v0.13.1-rc1
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: registry.cn-hangzhou.aliyuncs.com/alvinos/flanned:v0.13.1-rc1
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run/flannel
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run/flannel
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- List item