createrepo --update /var/localrepo/
# 禁用 firewall 和 swap
[root@master ~]# sed '/swap/d' -i /etc/fstab
[root@master ~]# swapoff -a
[root@master ~]# dnf remove -y firewalld-*
[root@master ~]# vim /etc/hosts
192.168.1.30 harbor
192.168.1.50 master
192.168.1.51 node-0001
192.168.1.52 node-0002
192.168.1.53 node-0003
192.168.1.54 node-0004
192.168.1.55 node-0005
[root@master ~]# dnf install -y kubeadm kubelet kubectl containerd.io ipvsadm ipset iproute-tc
[root@master ~]# containerd config default >/etc/containerd/config.toml
[root@master ~]# vim /etc/containerd/config.toml
61: sandbox_image = "harbor:443/k8s/pause:3.9"
125: SystemdCgroup = true
154 行新插入:
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://harbor:443"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."harbor:443"]
endpoint = ["https://harbor:443"]
[plugins."io.containerd.grpc.v1.cri".registry.configs."harbor:443".tls]
insecure_skip_verify = true
[root@master ~]# systemctl enable --now kubelet containerd
配置内核参数
# 加载内核模块
[root@master ~]# cat >/etc/modules-load.d/containerd.conf<<EOF
overlay
br_netfilter
xt_conntrack
EOF
[root@master ~]# systemctl start systemd-modules-load.service
# 设置内核参数
[root@master ~]# cat >/etc/sysctl.d/99-kubernetes-cri.conf<<EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.netfilter.nf_conntrack_max = 1000000
EOF
[root@master ~]# sysctl -p /etc/sysctl.d/99-kubernetes-cri.conf
导入 k8s 镜像
# 拷贝本阶段 kubernetes/init 目录到 master
[root@ecs-proxy s4]# rsync -av kubernetes/init 192.168.1.50:./
[root@master ~]# dnf install -y docker-ce
[root@master ~]# mkdir -p /etc/docker
[root@master ~]# vim /etc/docker/daemon.json
{
"registry-mirrors":["https://harbor:443"],
"insecure-registries":["harbor:443"]
}
[root@master ~]# systemctl enable --now docker
# 登录 harbor 仓库,上传镜像
[root@master ~]# docker login harbor:443
Username: admin
Password: ********
Login Succeeded
[root@master ~]# docker load -i init/v1.26.0.tar.xz
[root@master ~]# docker images|while read i t _;do
[[ "${t}" == "TAG" ]] && continue
[[ "${i}" =~ ^"harbor:443/".+ ]] && continue
docker tag ${i}:${t} harbor:443/k8s/${i##*/}:${t}
docker push harbor:443/k8s/${i##*/}:${t}
docker rmi ${i}:${t} harbor:443/k8s/${i##*/}:${t}
done
设置 Tab 键
[root@master ~]# source <(kubeadm completion bash|tee /etc/bash_completion.d/kubeadm)
[root@master ~]# source <(kubectl completion bash|tee /etc/bash_completion.d/kubectl)
7、master 安装
# 测试系统环境
[root@master ~]# kubeadm init --config=init/init.yaml --dry-run 2>error.log
[root@master ~]# cat error.log
[root@master ~]# rm -rf error.log /etc/kubernetes/tmp
# 主控节点初始化
[root@master ~]# kubeadm init --config=init/init.yaml |tee init/init.log
# 管理授权
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
# 验证安装结果
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master NotReady control-plane 19s v1.26.0
配置控制节点:
k8s里面有一个防火墙,所以会冲突
导入k8s镜像
批量上传镜像
[root@master ~]# dnf install -y docker-ce
[root@master ~]# mkdir -p /etc/docker
[root@master ~]# vim /etc/docker/daemon.json
{
"registry-mirrors":["https://harbor:443"],
"insecure-registries":["harbor:443"]
}
[root@master ~]# systemctl enable --now docker
# 登录 harbor 仓库,上传镜像
[root@master ~]# docker login harbor:443
Username: admin
Password: ********
Login Succeeded
[root@master ~]# docker load -i init/v1.26.0.tar.xz
[root@master ~]# docker images|while read i t _;do
[[ "${t}" == "TAG" ]] && continue
[[ "${i}" =~ ^"harbor:443/".+ ]] && continue
docker tag ${i}:${t} harbor:443/k8s/${i##*/}:${t}
docker push harbor:443/k8s/${i##*/}:${t}
docker rmi ${i}:${t} harbor:443/k8s/${i##*/}:${t}
done
上传镜像的脚本
上传镜像
[root@master ~]# cd plugins/calico
[root@master calico]# docker load -i calico.tar.xz
[root@master calico]# docker images|while read i t _;do
[[ "${t}" == "TAG" ]] && continue
[[ "${i}" =~ ^"harbor:443/".+ ]] && continue
docker tag ${i}:${t} harbor:443/plugins/${i##*/}:${t}
docker push harbor:443/plugins/${i##*/}:${t}
docker rmi ${i}:${t} harbor:443/plugins/${i##*/}:${t}
done
循环传
<<< 模拟用户交互
<标准输入重定向
<< 多行终止符
6、设置 Tab 键
[root@master ~]# source <(kubeadm completion bash|tee /etc/bash_completion.d/kubeadm)
[root@master ~]# source <(kubectl completion bash|tee /etc/bash_completion.d/kubectl)
7、master 安装
# 测试系统环境
[root@master ~]# kubeadm init --config=init/init.yaml --dry-run 2>error.log
[root@master ~]# cat error.log
[root@master ~]# rm -rf error.log /etc/kubernetes/tmp
# 主控节点初始化
[root@master ~]# kubeadm init --config=init/init.yaml |tee init/init.log
# 管理授权
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
授权命令以上
# 验证安装结果
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master NotReady control-plane 19s v1.26.0
Notredeay 网不通
获取calico
上传calico镜像
[root@master ~]# cd plugins/calico
[root@master calico]# docker load -i calico.tar.xz
[root@master calico]# docker images|while read i t _;do
[[ "${t}" == "TAG" ]] && continue
[[ "${i}" =~ ^"harbor:443/".+ ]] && continue
docker tag ${i}:${t} harbor:443/plugins/${i##*/}:${t}
docker push harbor:443/plugins/${i##*/}:${t}
docker rmi ${i}:${t} harbor:443/plugins/${i##*/}:${t}
done
安装 calico:解决镜像仓库
[root@master calico]# sed -ri 's,^(\s*image: )(.*/)?(.+),\1harbor:443/plugins/\3,' calico.yaml
4443: image: docker.io/calico/cni:v3.25.0
4471: image: docker.io/calico/cni:v3.25.0
4514: image: docker.io/calico/node:v3.25.0
4540: image: docker.io/calico/node:v3.25.0
4757: image: docker.io/calico/kube-controllers:v3.25.0
[root@master calico]# kubectl apply -f calico.yaml
[root@master calico]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready control-plane 23m v1.26.0
会自己设置一个网络,显示为Ready则为安装成功
安装计算节点
1、获取凭证
# 查看 token
[root@master ~]# kubeadm token list
TOKEN TTL EXPIRES
abcdef.0123456789abcdef 23h 2022-04-12T14:04:34Z
# 删除 token
[root@master ~]# kubeadm token delete abcdef.0123456789abcdef
bootstrap token "abcdef" deleted
# 创建 token
[root@master ~]# kubeadm token create --ttl=0 --print-join-command
kubeadm join 192.168.1.50:6443 --token fhf6gk.bhhvsofvd672yd41 --discovery-token-ca-cert-hash sha256:ea07de5929dab8701c1bddc347155fe51c3fb6efd2ce8a4177f6dc03d5793467
# 获取token_hash
# 1、查看安装日志 2、在创建token时候显示 3、使用 openssl 计算得到
[root@master ~]# openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt |openssl rsa -pubin -outform der |openssl dgst -sha256 -hex
2、node 安装
参考 控制节点安装步骤,在计算节点上完成步骤 2、3、4
[root@node ~]# 参考控制节点安装步骤2
[root@node ~]# 参考控制节点安装步骤3
[root@node ~]# 参考控制节点安装步骤4
[root@node ~]# kubeadm join 192.168.1.50:6443 --token fhf6gk.bhhvsofvd672yd41 --discovery-token-ca-cert-hash sha256:ea07de5929dab8701c1bddc347155fe51c3fb6efd2ce8a4177f6dc03d5793467
#------------------------ 在 master 节点上验证---------------------------
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready control-plane 76m v1.26.0
node-0001 Ready <none> 61s v1.26.0
3、批量部署
拷贝 kubernetes/nodejoin 到跳板机
[root@ecs-proxy s4]# cp -a kubernetes/nodejoin /root/
[root@ecs-proxy s4]# cd ~/nodejoin/
[root@ecs-proxy nodejoin]# vim nodeinit.yaml
... ...
vars:
master: '192.168.1.50:6443'
token: '这里改成你自己的token'
token_hash: 'sha256:这里改成你自己的token ca hash'
... ...
[root@ecs-proxy nodejoin]# ansible-playbook nodeinit.yaml
查看集群状态
# 验证节点工作状态
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready control-plane 99m v1.26.0
node-0001 Ready <none> 23m v1.26.0
node-0002 Ready <none> 57s v1.26.0
node-0003 Ready <none> 57s v1.26.0
node-0004 Ready <none> 57s v1.26.0
node-0005 Ready <none> 57s v1.26.0
# 验证容器工作状态
[root@master ~]# kubectl -n kube-system get pods
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-fc945b5f7-p4xnj 1/1 Running 0 77m
calico-node-6s8k2 1/1 Running 0 59s
calico-node-bxwdd 1/1 Running 0 59s
calico-node-d5g6x 1/1 Running 0 77m
calico-node-lfwdh 1/1 Running 0 59s
calico-node-qnhxr 1/1 Running 0 59s
calico-node-sjngw 1/1 Running 0 24m
coredns-844c6bb88b-89lzt 1/1 Running 0 59m
coredns-844c6bb88b-qpbvk 1/1 Running 0 59m
etcd-master 1/1 Running 0 70m
kube-apiserver-master 1/1 Running 0 70m
kube-controller-manager-master 1/1 Running 0 70m
kube-proxy-5xjzw 1/1 Running 0 59s
kube-proxy-9mbh5 1/1 Running 0 59s
kube-proxy-g2pmp 1/1 Running 0 99m
kube-proxy-l7lpk 1/1 Running 0 24m
kube-proxy-m6wfj 1/1 Running 0 59s
kube-proxy-vqtt8 1/1 Running 0 59s
kube-scheduler-master 1/1 Running 0 70m
主要是在安装和部署时使用的kubeadm
kubeadm config imges list 镜像管理命令,列出当前版本所需要的镜像
安装管理控制节点集群为init
join 安装计算节点
生成init模板文件
计算节点的安装
有凭证才能加入集群
[root@master ~]# kubeadm token create --ttl=0 (无限期)--print-join-command(打印出来怎么使用)
kubeadm join 192.168.1.50:6443 --token fhf6gk.bhhvsofvd672yd41 --discovery-token-ca-cert-hash sha256:ea07de5929dab8701c1bddc347155fe51c3fb6efd2ce8a4177f6dc03d5793467
退群为reset 一个节点只能加入一个唯一的集群
kubectl -n kube-system get pods
验证容器工作状态
集群管理命令
help
# 查看帮助命令信息
[root@master ~]# kubectl help version
Print the client and server version information for the current context.
Examples:
# Print the client and server versions for the current context
kubectl version
... ...
cluster-info
# 查看集群状态信息
[root@master ~]# kubectl cluster-info
Kubernetes control plane is running at https://192.168.1.50:6443
CoreDNS is running at https://192.168.1.50:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
... ...
version
# 查看服务端与客户端版本信息
[root@master ~]# kubectl version
WARNING: This version information is deprecated and will be replaced with the output from kubectl version --short. Use --output=yaml|json to get the full version.
Client Version: version.Info{Major:"1", Minor:"26", GitVersion:"v1.26.0", GitCommit:"b46a3f887ca979b1a5d14fd39cb1af43e7e5d12d", GitTreeState:"clean", BuildDate:"2022-12-08T19:58:30Z", GoVersion:"go1.19.4", Compiler:"gc", Platform:"linux/amd64"}
Kustomize Version: v4.5.7
Server Version: version.Info{Major:"1", Minor:"26", GitVersion:"v1.26.0", GitCommit:"b46a3f887ca979b1a5d14fd39cb1af43e7e5d12d", GitTreeState:"clean", BuildDate:"2022-12-08T19:51:45Z", GoVersion:"go1.19.4", Compiler:"gc", Platform:"linux/amd64"}
api-resources
# 查看资源对象类型
[root@master ~]# kubectl api-resources
NAME SHORTNAMES APIVERSION NAMESPACED KIND
bindings v1 true Binding
endpoints ep v1 true Endpoints
events ev v1 true Event
... ...
api-versions
# 查看资源对象版本
[root@master ~]# kubectl api-versions
admissionregistration.k8s.io/v1
apiextensions.k8s.io/v1
apiregistration.k8s.io/v1
apps/v1
... ...
config
# 查看当前认证使用的用户及证书
[root@master ~]# kubectl config get-contexts
CURRENT NAME CLUSTER AUTHINFO
* kubernetes-admin@kubernetes kubernetes kubernetes-admin
# 使用 view 查看详细配置
[root@master ~]# kubectl config view
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: DATA+OMITTED
server: https://192.168.1.50:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate-data: REDACTED
client-key-data: REDACTED
主机授权管理
[root@harbor ~]# vim /etc/hosts
192.168.1.30 harbor
192.168.1.50 master
192.168.1.51 node-0001
192.168.1.52 node-0002
192.168.1.53 node-0003
192.168.1.54 node-0004
192.168.1.55 node-0005
[root@harbor ~]# dnf install -y kubectl
[root@harbor ~]# mkdir -p $HOME/.kube
[root@harbor ~]# rsync -av master:/etc/kubernetes/admin.conf $HOME/.kube/config
[root@harbor ~]# chown $(id -u):$(id -g) $HOME/.kube/config
[root@harbor ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready control-plane 24h v1.26.0
node-0001 Ready <none> 22h v1.26.0
node-0002 Ready <none> 22h v1.26.0
node-0003 Ready <none> 22h v1.26.0
node-0004 Ready <none> 22h v1.26.0
node-0005 Ready <none> 22h v1.26.0
资源对象
一个应用则为一个资源对象
Pod管理命令
创建pod
创建Pod
# 创建 Pod
[root@master ~]# kubectl run myweb --image=myos:nginx
pod/myweb created
# 查询资源对象
[root@master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE
myweb 1/1 Running 0 3s 10.244.1.3 node-0001
[root@master ~]# curl http://10.244.1.3
Nginx is running !
kubectl get pod -o wide
get
# 查看 Pod 资源对象
[root@master ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
myweb 1/1 Running 0 10m
# 只查看资源对象的名字
[root@master ~]# kubectl get pods -o name
pod/myweb
# 查看资源对象运行节点的信息
[root@master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE
myweb 1/1 Running 0 10m 10.244.1.3 node-0001
# 查看资源对象详细信息,Yaml 格式
[root@master ~]# kubectl get pod myweb -o yaml
apiVersion: v1
kind: Pod
metadata:
name: myweb
... ...
# 查看名称空间
[root@master ~]# kubectl get namespaces
NAME STATUS AGE
default Active 39h
kube-node-lease Active 39h
kube-public Active 39h
kube-system Active 39h
# 查看 kube-system 名称空间中的 Pod 信息
[root@master ~]# kubectl -n kube-system get pods
NAME READY STATUS RESTARTS AGE
etcd-master 1/1 Running 0 39h
kube-apiserver-master 1/1 Running 0 39h
kube-controller-manager-master 1/1 Running 0 39h
kube-scheduler-master 1/1 Running 0 39h
... ...
create
# 创建名称空间资源对象
[root@master ~]# kubectl create namespace work
namespace/work created
# 查看名称空间
[root@master ~]# kubectl get namespaces
NAME STATUS AGE
default Active 39h
kube-node-lease Active 39h
kube-public Active 39h
kube-system Active 39h
work Active 11s
run
# 创建简单 Pod 资源对象
[root@master ~]# kubectl -n work run myhttp --image=myos:httpd
pod/myhttp created
# 查询资源对象
[root@master ~]# kubectl -n work get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE
myhttp 1/1 Running 0 3s 10.244.2.2 node-0002
# 访问验证
[root@master ~]# curl http://10.244.2.2
Welcome to The Apache.
describe
# 查看资源对象的属性信息
[root@master ~]# kubectl describe pod myweb
Name: myweb
Namespace: default
Priority: 0
Service Account: default
Node: node-0001/192.168.1.51
... ...
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 16m default-scheduler Successfully assigned default/myweb to node-0001
Normal Pulled 16m kubelet Container image "myos:nginx" already present on machine
Normal Created 16m kubelet Created container myweb
Normal Started 16m kubelet Started container myweb
# 查看 work 名称空间下的 pod 信息
[root@master ~]# kubectl -n work describe pod myhttp
Name: myhttp
Namespace: work
Priority: 0
Service Account: default
Node: node-0002/192.168.1.52
... ...
logs
# 访问服务,并查看日志
[root@master ~]# curl http://10.244.1.3/info.php
[root@master ~]# curl http://10.244.2.2/info.php
... ...
# 查看 myweb 日志
[root@master ~]# kubectl logs myweb
2022/11/12 18:28:54 [error] 7#0: *2 open() "/usr/local/nginx/html/info.php" failed (2: No such file or directory), client: 10.244.0.0, server: localhost, request: "GET /info.php HTTP/1.1", host: "10.244.2.12"
# 查看 myhttp 日志
[root@master ~]# kubectl -n work logs myhttp
[root@master ~]#
Pod 管理命令(二)
子命令 说明 备注
exec 在某一个容器内执行特定的命令 可选参数: -c 容器名称
cp 在容器和宿主机之间拷贝文件或目录 可选参数: -c 容器名称
delete 删除资源对象 可选参数: -l 标签
exec
# 在容器内执行命令
[root@master ~]# kubectl exec myweb -- ls
50x.html
index.html
[root@master ~]# kubectl exec -it myweb -- /bin/bash
[root@myweb html]# ifconfig eth0
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1450
inet 10.244.1.3 netmask 255.255.255.0 broadcast 10.244.2.255
ether 3a:32:78:59:ed:25 txqueuelen 0 (Ethernet)
... ...
cp
# 与容器进行文件或目录传输
[root@master ~]# kubectl cp myweb:/etc/yum.repos.d /root/aaa
tar: Removing leading `/' from member names
[root@master ~]# tree /root/aaa
/root/aaa
├── local.repo
├── Rocky-AppStream.repo
├── Rocky-BaseOS.repo
└── Rocky-Extras.repo
0 directories, 4 files
[root@master ~]# kubectl -n work cp /etc/passwd myhttp:/root/mima
[root@master ~]# kubectl -n exec myweb -- ls /root/
mima
delete
# 删除资源对象
[root@master ~]# kubectl delete pods myweb
pod "myweb" deleted
# 删除 work 名称空间下所有 Pod 对象
[root@master ~]# kubectl -n work delete pods --all
pod "myhttp" deleted
# 删除名称空间
[root@master ~]# kubectl delete namespaces work
namespace "work" deleted
资源对象文件
Pod 资源对象文件
[root@master ~]# vim myweb.yaml
---
kind: Pod
apiVersion: v1
metadata:
name: myweb
spec:
containers:
- name: webserver
image: myos:nginx
资源文件管理命令
子命令 说明 备注
create 创建文件中定义的资源 支持指令式和资源对象文件配置
apply 创建(更新)文件中定义的资源 只支持资源对象文件(声明式)
delete 删除文件中定义的资源 支持指令式和资源对象文件配置
create
# 创建资源对象
[root@master ~]# kubectl create -f myweb.yaml
pod/myweb created
# 不能更新,重复执行会报错
[root@master ~]# kubectl create -f myweb.yaml
Error from server (AlreadyExists): error when creating "myweb.yaml": pods "myweb" already exists
delete
# 使用资源对象文件删除
[root@master ~]# kubectl delete -f myhttp.yaml
pod "myhttp" deleted
[root@master ~]# kubectl get pods
No resources found in default namespace.
apply
# 创建资源对象
[root@master ~]# kubectl apply -f myweb.yaml
pod/myweb created
# 更新资源对象
[root@master ~]# kubectl apply -f myweb.yaml
pod/myweb configured
# 删除资源对象
[root@master ~]# kubectl delete -f myweb.yaml
pod "myweb" deleted
# 拓展提高
# 与 kubectl apply -f myweb.yaml 功能相同
[root@master ~]# cat myweb.yaml |kubectl apply -f -
kubectl -n work describe pod myhttp
名称空间
除了pod都用create创建pod是用run创建
排错三兄弟
kubectl -n work get pods -o wide 查看资源对象
kubectl -n work describe pod myhttp 查看容器外面的报错 pod的报错
kubectl logs myweb 查看容器的报错 kubectl -n work logs myhttp
删除名称空间,需要先删除名称内所有运行的容器
跨容器拷贝文件