一.环境说明
1.机器环境
机器名 | IP地址 | 版本 | 配置 |
---|---|---|---|
k8s-master-1 | 192.168.186.148 | CentOS Linux release 7.9.2009 (Core) | 2核/8G/30GB |
k8s-master-2 | 192.168.186.149 | CentOS Linux release 7.9.2009 (Core) | 2核/4G/20GB |
k8s-master-3 | 192.168.186.152 | CentOS Linux release 7.9.2009 (Core) | 2核/4G/20GB |
k8s-node-1 | 192.168.186.150 | CentOS Linux release 7.9.2009 (Core) | 2核/4G/20GB |
k8s-node-2 | 192.168.186.151 | CentOS Linux release 7.9.2009 (Core) | 2核/4G/20GB |
ansible | 192.168.186.153 | CentOS Linux release 7.9.2009 (Core) | 2核/8G/20GB |
nfs | 192.168.186.154 | CentOS Linux release 7.9.2009 (Core) | 2核/4G/20GB |
2.软件环境
Centos 7.9、Kubernetes v1.28.2、Docker 26.1.4、ansible 2.9.27、Prometheus 2.54.1、Grafana 9.1.2、gitlab 17.4.0-jh、harbor 2.10.3、Jenkins
二.环境初始化
1.安装Centos
正常安装即可
2.设置主机名
在k8s-master-1上执行
hostnamectl set-hostname k8s-master-1 && bash
在k8s-master-2上执行
hostnamectl set-hostname k8s-master-2 && bash
在 k8s-node-1上执行
hostnamectl set-hostname k8s-node-1 && bash
在 k8s-node-2上执行
hostnamectl set-hostname k8s-node-2 && bash
3.配置静态IP
cat >/etc/sysconfig/network-scripts/ifcfg-ens33 <<EOF
BOOTPROTO="static"
NAME="ens33"
DEVICE="ens33"
ONBOOT="yes"
IPADDR=$2
NETMASK=255.255.255.0
GATEWAY=$3
DNS1=114.114.114.114
EOF
4.配置hosts
sudo cat << EOF >> /etc/hosts
192.168.186.148 k8s-master-1
192.168.186.149 k8s-master-2
192.168.186.150 k8s-node-1
192.168.186.151 k8s-node-2
EOF
5.关闭selinux
#临时关闭
setenforce 0
#永久关闭
sed -i '/SELINUX=/ s/enforcing/disabled/' /etc/selinux/config
# 修改selinux配置文件之后,重启机器,selinux配置才能永久生效
reboot
# 检查
getenforce
6.禁用firewalld和清空iptables
#Centos
systemctl stop firewalld && systemctl disable firewalld
#Ubuntu
ufw disable
#清空iptables
iptables -F
iptables -X
7.关闭交换分区
#临时关闭
swapoff -a
#永久关闭
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
8.调整内核参数
# 修改linux的内核参数,添加网桥过滤和地址转发功能,转发IPv4并让iptables看到桥接流量
cat <<EOF | tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
# 加载网桥过滤模块
modprobe overlay
modprobe br_netfilter
# 编辑/etc/sysctl.d/kubernetes.conf文件,添加如下配置:
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
# 应用sysctl参数而不重新启动
sysctl -p
# 查看br_netfilter和 overlay模块是否加载成功
lsmod | grep -e br_netfilter -e overlay
9.更新配置源
cd /etc/yum.repos.d
curl -O http://mirrors.aliyun.com/repo/Centos-7.repo
#新建存放centos官方提供的repo文件,因为官方不提供服务了
mkdir backup
mv CentOS-* backup
#更新和配置软件源
yum clean all && yum makecache
yum install -y yum-utils
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
#下载必要命令
yum install vim net-tools tree psmisc epel* -y
10.配置ipvs
# 安装ipset和ipvsadm
yum install ipset ipvsadm -y
# 添加需要加载的模块写入脚本文件
# modprobe 作用是加载模块到内核里
cat <<EOF > /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
# 为脚本文件添加执行权限
chmod +x /etc/sysconfig/modules/ipvs.modules
# 执行脚本文件
/bin/bash /etc/sysconfig/modules/ipvs.modules
# 查看对应的模块是否加载成功
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
11.配置时间同步
systemctl start chronyd && systemctl enable chronyd
12.重启
reboot
13.一键执行脚本
[root@k8s-master-1 ~]# cat init.sh
#!/bin/bash
#第1步:下载阿里云的centos-7.reop文件
cd /etc/yum.repos.d
curl -O http://mirrors.aliyun.com/repo/Centos-7.repo
#新建存放centos官方提供的repo文件,因为官方不提供服务了
mkdir backup
mv CentOS-* backup
#更新和配置软件源
yum clean all && yum makecache
yum install -y yum-utils
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
#下载必要命令
yum install vim net-tools tree psmisc epel* -y
#第二步:修改主机名
hostnamectl set-hostname $1
#第三步:配置静态IP地址
cat >/etc/sysconfig/network-scripts/ifcfg-ens33 <<EOF
BOOTPROTO="static"
NAME="ens33"
DEVICE="ens33"
ONBOOT="yes"
IPADDR=$2
NETMASK=255.255.255.0
GATEWAY=$3
DNS1=114.114.114.114
EOF
#第四步:关闭selinux和防火墙firewalld服务
systemctl stop firewalld
systemctl disable firewalld
sed -i '/SELINUX=/ s/enforcing/disabled/' /etc/selinux/config
#第五步:清空iptables
iptables -F
iptables -X
#第六步:关闭交换分区
#临时关闭
swapoff -a
#永久关闭
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
#第七步 调整内核参数
# 修改linux的内核参数,添加网桥过滤和地址转发功能,转发IPv4并让iptables看到桥接流量
cat <<EOF | tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF
# 加载网桥过滤模块
modprobe overlay
modprobe br_netfilter
# 编辑/etc/sysctl.d/kubernetes.conf文件,添加如下配置:
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
# 应用sysctl参数而不重新启动
sysctl -p
# 查看br_netfilter和 overlay模块是否加载成功
lsmod | grep -e br_netfilter -e overlay
#第八步:配置hosts
sudo cat << EOF >> /etc/hosts
192.168.186.148 k8s-master-1
192.168.186.149 k8s-master-2
192.168.186.150 k8s-node-1
192.168.186.151 k8s-node-2
EOF
#第九步:配置ipvs功能
# 安装ipset和ipvsadm
yum install ipset ipvsadm -y
# 添加需要加载的模块写入脚本文件
# modprobe 作用是加载模块到内核里
cat <<EOF > /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
# 为脚本文件添加执行权限
chmod +x /etc/sysconfig/modules/ipvs.modules
# 执行脚本文件
/bin/bash /etc/sysconfig/modules/ipvs.modules
# 查看对应的模块是否加载成功
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
#第十步:配置时间同步
systemctl start chronyd && systemctl enable chronyd
#重启服务器
reboot
三.配置docker环境
1.移除docker原有环境
sudo yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine
2.安装docker环境
sudo yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
3.配置docker服务自启动
# 启动docker
systemctl start docker
# 设置docker开机启动
systemctl enable docker
# 验证
systemctl status docker
4.配置docker镜像加速
sudo tee /etc/docker/daemon.json <<-'EOF'
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": [
"https://hub.dftianyi.top",
"https://docker.mirrors.ustc.edu.cn",
"http://hub-mirror.c.163.com",
"https://docker.mirrors.sjtug.sjtu.edu.cn",
"https://docker.m.daocloud.io",
"https://dockertest.jsdelivr.fyi"
]
}
EOF
5.配置cri-docker
kubernets 1.24版本后默认使用containerd做底层容器,需要使用cri-dockerd做中间层来与docker通信
# 下载
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.8/cri-dockerd-0.3.8-3.el7.x86_64.rpm
#安装
rpm -ivh cri-dockerd-0.3.8-3.el7.x86_64.rpm
#重载系统守护进程
systemctl daemon-reload
#修改配置文件
vim /usr/lib/systemd/system/cri-docker.service
#修改第10行 ExecStart
#改为
#ExecStart=/usr/bin/cri-dockerd --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.9 --container-runtime-endpoint fd://
6.配置cri-docker服务自启动
#重载系统守护进程
systemctl daemon-reload
#启动cri-dockerd
systemctl start cri-docker.socket cri-docker
#设置cri-dockerd自启动
systemctl enable cri-docker.socket cri-docker
#检查Docker组件状态
systemctl status docker cir-docker.socket cri-docker
#测试 可选
docker run hello-world
四.配置k8s集群环境
1.配置k8s组件源
cat <<EOF | tee /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#更新索引缓冲
yum makecache```
## 2.安装
- kubeadm是用来安装master节点上的组件(apiserver、etcd、scheduler等)
- kubelet是用来对容器运行时软件进行管理的(管理docker的)
- kubectl是用来输入k8s命令的
```bash
# 安装
yum install -y install kubeadm-1.28.2-0 kubelet-1.28.2-0 kubectl-1.28.2-0 --disableexcludes=kubernetes
# 启动kubelet并设置开机自启
systemctl enable --now kubelet
这时kubelet启动失败是正常的,要集群初始化后才能启动成功
五.集群初始化
1.kubeadm init
仅在master节点执行
kubeadm init --kubernetes-version=v1.28.2 \
--pod-network-cidr=10.224.0.0/16 \
--apiserver-advertise-address=192.168.186.148 \
--image-repository=registry.aliyuncs.com/google_containers \
--cri-socket=unix:///var/run/cri-dockerd.sock
其中,apiserver-advertise-address替换为master节点ip
成功后会提示以下信息:
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 172.18.212.47:6443 --token 3uc69h.n4oc8uepjjpd0bw4 \
--discovery-token-ca-cert-hash sha256:c31136d1ba9c9354821b669a5ab72c46ec0bcd9dc3f6677a916a04fcaa7ae515
记下系统提示命令kubectl join xxxxx
,并在后面追加unix:///var/run/cri-dockerd.sock
完整命令应该类似于:kubeadm join 172.18.212.47:6443 --token xxx --discovery-token-ca-cert-hash sha256:xxx --cri-socket unix:///var/run/cri-dockerd.sock
然后执行如下命令:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubeadm token create --print-join-command
2.node节点加入集群
node节点执行如下命令:
kubeadm join 192.168.186.148:6443 --token 4knj1w.r5jpljsuh6zngz33 --discovery-token-ca-cert-hash sha256:50969507f52be565ba31e083257196e233036ea93d3d993a5b1185f375922f2a --cri-socket unix:///var/run/cri-dockerd.sock
–cri-socket unix:///var/run/cri-dockerd.sock 是重点
分配worker
kubectl label node k8s-node-1 node-role.kubernetes.io/worker=worker
kubectl label node k8s-node-2 node-role.kubernetes.io/worker=worker
3.master节点加入集群
新加master 直接把master1的证书发给master2,然后master去join会出错
步骤如下:
3.1 master以node节点加入集群,然后master-1把master-2删除
#master-2执行
kubeadm join 192.168.186.148:6443 --token 22nb94.ub540p20f0x47t3d \
--discovery-token-ca-cert-hash sha256:33ba863876a904bd93c73f5013917647a0cfcdb446870f145ac5b6dcc20eadae --cri-socket unix:///var/run/cri-dockerd.sock
#master1执行
kubectl delete nodes k8s-master-2
3.2 master1执行以下命令获得certificate key
[root@k8s-master-1 ~]# kubeadm init phase upload-certs --upload-certs
3.3 master2在join一次,删除报错的文件和节点
#master再加入集群一次
[root@k8s-master-2 ~]# kubeadm join 192.168.186.148:6443 --token 22nb94.ub540p20f0x47t3d --discovery-token-ca-cert-hash sha256:33ba863876a904bd93c73f5013917647a0cfcdb446870f145ac5b6dcc20eadae --cri-socket unix:///var/run/cri-dockerd.sock
[preflight] Running pre-flight checks
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR FileAvailable--etc-kubernetes-kubelet.conf]: /etc/kubernetes/kubelet.conf already exists
[ERROR Port-10250]: Port 10250 is in use
[ERROR FileAvailable--etc-kubernetes-pki-ca.crt]: /etc/kubernetes/pki/ca.crt already exists
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
#除错
[root@k8s-master-2 ~]# rm -rf /etc/kubernetes/kubelet.conf
[root@k8s-master-2 ~]# rm -rf /etc/kubernetes/pki/ca.crt
[root@k8s-master-2 ~]# netstat -anplut|grep 10250
tcp6 0 0 :::10250 :::* LISTEN 2100/kubelet
[root@k8s-master-2 ~]# kill -9 2100
3.4 master执行以下命令加入集群
[root@k8s-master-2 ~]# kubeadm join 192.168.186.148:6443 --token 22nb94.ub540p20f0x47t3d --discovery-token-ca-cert-hash sha256:33ba863876a904bd93c73f5013917647a0cfcdb446870f145ac5b6dcc20eadae --cri-socket unix:///var/run/cri-dockerd.sock --control-plane --certificate-key a725e482c9293d7bebfb473645d977ceedf97adb05c571bd52980b9f4b87ea38
–control-plane --certificate-key a725e482c9293d7bebfb473645d977ceedf97adb05c571bd52980b9f4b87ea38 这段码是上述获得的key
3.5 验证
[root@k8s-master-1 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master-1 NotReady control-plane 10m v1.28.2
k8s-master-2 NotReady control-plane 68s v1.28.2
k8s-node-1 NotReady worker 17s v1.28.2
k8s-node-2 NotReady worker 19s v1.28.2
4.安装calico网络插件
master执行:
以下操作需要等待一段时间
# master执行
wget https://docs.projectcalico.org/manifests/calico.yaml
kubectl apply -f calico.yaml
# 验证
kubectl get nodes
# NAME STATUS ROLES AGE VERSION
# k8s-master-1 Ready control-plane 2h v1.28.2
# k8s-master-2 Ready control-plane 2h v1.28.2
# k8s-node-1 Ready worker 1h v1.28.2
# k8s-node-2 Ready worker 1h v1.28.2
5 k8s配置ipvs
kubectl edit configmap kube-proxy -n kube-system
# 修改配置
mode: "ipvs"
# 删除所有kube-proxy pod使之重启
kubectl delete pods -n kube-system -l k8s-app=kube-proxy
六.安装Dashboard
1.下载安装
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
修改Service部分,改为NodePort对外暴露端口
kind: Service
apiVersion: v1
metadata:
...
spec:
type: NodePort # 改为NodePort
安装
kubectl apply -f recommended.yaml
2.查看
kubectl get pods,svc -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
pod/dashboard-metrics-scraper-5657497c4c-x8srr 1/1 Running 0 3d22h
pod/kubernetes-dashboard-78f87ddfc-2b6rq 1/1 Running 0 3d22h
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/dashboard-metrics-scraper ClusterIP 10.109.73.125 <none> 8000/TCP 3d22h
service/kubernetes-dashboard NodePort 10.101.254.225 <none> 443:30088/TCP 3d22h
3.创建账号
创建dashboard-access-token.yaml文件
# Creating a Service Account
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
---
# Creating a ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
---
# Getting a long-lived Bearer Token for ServiceAccount
apiVersion: v1
kind: Secret
metadata:
name: admin-user
namespace: kubernetes-dashboard
annotations:
kubernetes.io/service-account.name: "admin-user"
type: kubernetes.io/service-account-token
# Clean up and next steps
# kubectl -n kubernetes-dashboard delete serviceaccount admin-user
# kubectl -n kubernetes-dashboard delete clusterrolebinding admin-user
执行
kubectl apply -f dashboard-access-token.yaml
# 获取token
kubectl get secret admin-user -n kubernetes-dashboard -o jsonpath={".data.token"} | base64 -d
4.访问dashboard
# 获取端口
kubectl get svc -n kubernetes-dashboard
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
dashboard-metrics-scraper ClusterIP 10.109.73.125 <none> 8000/TCP 3d23h
kubernetes-dashboard NodePort 10.101.254.225 <none> 443:30088/TCP 3d23h # 端口为30088
浏览器访问:https://ClusterIP:PORT
注意https
页面中输入上一步获取到的token即可
七.安装kubectl命令补全
yum install -y bash-completion
# 临时设置自动补全
source <(kubectl completion bash)
# 永久设置自动补全
echo "source <(kubectl completion bash)" >> ~/.bashrc && bash
八.部署metric-server
1.下载
master执行
wget https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.2/components.yaml
2.修改
vim 修改140行左右
原:
containers:
- args:
...
image: k8s.gcr.io/metrics-server/metrics-server:v0.6.2
修改后:
containers:
- args:
...
- --kubelet-insecure-tls # 添加这一行
image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server:v0.6.2 # 修改镜像仓库地址
3.应用
kubectl apply -f components.yaml
4.查看
[root@k8s-master ~]# kubectl top nodes
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
k8s-master 276m 6% 3445Mi 44%
k8s-node-1 202m 0% 11326Mi 17%
k8s-node-2 204m 0% 14497Mi 22%
[root@k8s-master kubernetes]# kubectl top pods
NAME CPU(cores) MEMORY(bytes)
nginx-test-d5db944-6wqkw 0m 14Mi
rpa-llm-embedding-service-9477877dc-fzg88 1m 492Mi
九.部署ansible
1.新建虚拟机并完成环境初始化
root@k8s-master-1 ~]# scp init.sh root@192.168.186.153:/root
root@ansible ~]# bash init.sh ansible 192.168.186.153 192.168.186.2
2. 安装ansible
安装ansible,在管理节点上
[root@ansible .ssh]# yum install epel-release -y
[root@ansible .ssh]# yum install ansible -y
3. 生成公钥并上传到集群里的服务器上
[root@ansible .ssh]# ssh-keygen
[root@ansible .ssh]# cd /root/.ssh
[root@ansible .ssh]# ssh-copy-id -i id_rsa.pub root@192.168.186.148
[root@ansible .ssh]# ssh-copy-id -i id_rsa.pub root@192.168.186.149
[root@ansible .ssh]# ssh-copy-id -i id_rsa.pub root@192.168.186.150
[root@ansible .ssh]# ssh-copy-id -i id_rsa.pub root@192.168.186.151
4. 测试免密通道
[root@ansible .ssh]# ssh 'root@192.168.186.148'
Last login: Fri Sep 20 14:31:28 2024 from 192.168.186.1
5.添加主机清单,新增控制的服务器,并测试
在/etc/ansible/hosts中添加如下内容
[master1]
192.168.186.148
[master2]
192.168.186.149
[node1]
192.168.186.150
[node2]
192.168.186.151
测试
[root@ansible .ssh]# ansible all -m shell -a "ip add"
十.部署Prometheus
1.更换docker镜像源
[root@k8s-master-1 prometheus监控k8s]# cat /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": [
"https://hub.dftianyi.top",
"https://docker.mirrors.ustc.edu.cn",
"http://hub-mirror.c.163.com",
"https://docker.mirrors.sjtug.sjtu.edu.cn",
"https://docker.m.daocloud.io",
"https://dockertest.jsdelivr.fyi"
]
}
2.拉取需要的镜像
防止pod创建过久,提前拉取需要的镜像源
docker pull prom/node-exporter
docker pull prom/prometheus
docker pull grafana/grafana:6.1.4
3.部署node-exporter
执行以下yaml文件
[root@k8s-master-1 prometheus监控k8s]# cat node-exporter.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: node-exporter
namespace: kube-system
labels:
k8s-app: node-exporter
spec:
selector:
matchLabels:
k8s-app: node-exporter
template:
metadata:
labels:
k8s-app: node-exporter
spec:
containers:
- image: prom/node-exporter
name: node-exporter
ports:
- containerPort: 9100
protocol: TCP
name: http
---
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: node-exporter
name: node-exporter
namespace: kube-system
spec:
ports:
- name: http
port: 9100
nodePort: 31672
protocol: TCP
type: NodePort
selector:
k8s-app: node-exporter
执行
kubectl apply -f node-exporter.yaml
检查
kubectl get pods -n kube-system -o wide
4.Prometheus安装
(1) 对sa做rabc授权
执行以下yaml文件
[root@k8s-master-1 prometheus监控k8s]# cat rbac-setup.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- nodes/proxy
- services
- endpoints
- pods
verbs: ["get", "list", "watch"]
- apiGroups:
- extensions
resources:
- ingresses
verbs: ["get", "list", "watch"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus
namespace: kube-system
(2)创建configmap存储卷,用来存放Prometheus配置信息
[root@k8s-master-1 prometheus监控k8s]# cat configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-config
namespace: kube-system
data:
prometheus.yml: |
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: 'kubernetes-apiservers'
kubernetes_sd_configs:
- role: endpoints
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
action: keep
regex: default;kubernetes;https
- job_name: 'kubernetes-nodes'
kubernetes_sd_configs:
- role: node
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
- job_name: 'kubernetes-cadvisor'
kubernetes_sd_configs:
- role: node
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
- job_name: 'kubernetes-service-endpoints'
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
action: replace
target_label: __scheme__
regex: (https?)
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
action: replace
target_label: __address__
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: kubernetes_name
- job_name: 'kubernetes-services'
kubernetes_sd_configs:
- role: service
metrics_path: /probe
params:
module: [http_2xx]
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
action: keep
regex: true
- source_labels: [__address__]
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter.example.com:9115
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
target_label: kubernetes_name
- job_name: 'kubernetes-ingresses'
kubernetes_sd_configs:
- role: ingress
relabel_configs:
- source_labels: [__meta_kubernetes_ingress_annotation_prometheus_io_probe]
action: keep
regex: true
- source_labels: [__meta_kubernetes_ingress_scheme,__address__,__meta_kubernetes_ingress_path]
regex: (.+);(.+);(.+)
replacement: ${1}://${2}${3}
target_label: __param_target
- target_label: __address__
replacement: blackbox-exporter.example.com:9115
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_ingress_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_ingress_name]
target_label: kubernetes_name
- job_name: 'kubernetes-pods'
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
(3)通过deployment部署Prometheus
[root@k8s-master-1 prometheus监控k8s]# cat prometheus.deploy.yml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
name: prometheus-deployment
name: prometheus
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: prometheus
template:
metadata:
labels:
app: prometheus
spec:
containers:
- image: prom/prometheus
name: prometheus
command:
- "/bin/prometheus"
args:
- "--config.file=/etc/prometheus/prometheus.yml"
- "--storage.tsdb.path=/prometheus"
- "--storage.tsdb.retention=24h"
ports:
- containerPort: 9090
protocol: TCP
volumeMounts:
- mountPath: "/prometheus"
name: data
- mountPath: "/etc/prometheus"
name: config-volume
resources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 500m
memory: 2500Mi
serviceAccountName: prometheus
volumes:
- name: data
emptyDir: {}
- name: config-volume
configMap:
name: prometheus-config
(4)给 prometheus pod 创建一个 service
[root@k8s-master-1 prometheus监控k8s]# cat prometheus.svc.yml
kind: Service
apiVersion: v1
metadata:
labels:
app: prometheus
name: prometheus
namespace: kube-system
spec:
type: NodePort
ports:
- port: 9090
targetPort: 9090
nodePort: 30003
selector:
5.Grafana安装
(1)通过deployment部署Grafana
[root@k8s-master-1 prometheus监控k8s]# cat grafana-deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: grafana-core
namespace: kube-system
labels:
app: grafana
component: core
spec:
replicas: 1
selector:
matchLabels:
app: grafana
template:
metadata:
labels:
app: grafana
component: core
spec:
containers:
- image: grafana/grafana:9.1.2
name: grafana-core
imagePullPolicy: IfNotPresent
# env:
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
# The following env variables set up basic auth twith the default admin user and admin password.
- name: GF_AUTH_BASIC_ENABLED
value: "true"
- name: GF_AUTH_ANONYMOUS_ENABLED
value: "false"
# - name: GF_AUTH_ANONYMOUS_ORG_ROLE
# value: Admin
# does not really work, because of template variables in exported dashboards:
# - name: GF_DASHBOARDS_JSON_ENABLED
# value: "true"
readinessProbe:
httpGet:
path: /login
port: 3000
# initialDelaySeconds: 30
# timeoutSeconds: 1
#volumeMounts: #先不进行挂载
#- name: grafana-persistent-storage
# mountPath: /var
#volumes:
#- name: grafana-persistent-storage
#emptyDir: {}
(2)给 Grafana pod 创建一个 service
[root@k8s-master-1 prometheus监控k8s]# cat grafana-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: grafana
namespace: kube-system
labels:
app: grafana
component: core
spec:
type: NodePort
ports:
- port: 3000
selector:
app: grafana
component: core
(2)配置ingress将外部流量路由到Grafana
[root@k8s-master-1 prometheus监控k8s]# cat grafana-ing.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grafana
namespace: kube-system
spec:
rules:
- host: k8s.grafana
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: grafana
port:
number: 3000
6.检查
查看pods是否启动
[root@k8s-master-1 prometheus监控k8s]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-658d97c59c-8tg72 1/1 Running 1 (43m ago) 2d21h
calico-node-27jlx 1/1 Running 11 (43m ago) 36d
calico-node-659dm 1/1 Running 8 (40m ago) 36d
calico-node-l4686 1/1 Running 8 (40m ago) 36d
calico-node-zgq5n 1/1 Running 8 (2d20h ago) 36d
coredns-66f779496c-p5mxs 1/1 Running 1 (2d20h ago) 2d21h
coredns-66f779496c-zfs7j 1/1 Running 4 (43m ago) 2d21h
etcd-k8s-master-1 1/1 Running 27 (43m ago) 36d
etcd-k8s-master-2 1/1 Running 26 (2d20h ago) 36d
grafana-core-5586ff8d6c-dhl8x 1/1 Running 0 22m
kube-apiserver-k8s-master-1 1/1 Running 51 (43m ago) 36d
kube-apiserver-k8s-master-2 1/1 Running 28 (53m ago) 36d
kube-controller-manager-k8s-master-1 1/1 Running 16 (43m ago) 36d
kube-controller-manager-k8s-master-2 1/1 Running 15 (43m ago) 36d
kube-proxy-4fsf6 1/1 Running 8 (40m ago) 36d
kube-proxy-4zk4g 1/1 Running 12 (43m ago) 36d
kube-proxy-dspc5 1/1 Running 8 (2d20h ago) 36d
kube-proxy-wtb9k 1/1 Running 8 (40m ago) 36d
kube-scheduler-k8s-master-1 1/1 Running 16 (43m ago) 36d
kube-scheduler-k8s-master-2 1/1 Running 14 (43m ago) 36d
metrics-server-f974958b-297t4 1/1 Running 1 (40m ago) 2d21h
node-exporter-lhmjc 1/1 Running 0 23m
node-exporter-q8mfz 1/1 Running 0 23m
prometheus-5f565476d4-xj6sk 1/1 Running 0 23m
查看service是否启动
[root@k8s-master-1 prometheus监控k8s]# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
grafana NodePort 10.103.105.181 <none> 3000:30904/TCP 23m
kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 36d
metrics-server ClusterIP 10.106.101.186 <none> 443/TCP 36d
node-exporter NodePort 10.109.200.164 <none> 9100:31672/TCP 24m
prometheus NodePort 10.101.127.228 <none> 9090:30003/TCP 23m
在浏览器中输入192.168.186.148:30003访问Prometheus
在浏览器中输入192.168.186.148:30904访问Grafana
默认的用户名和密码是
用户名admin
密码admin
配置grafana时,选择模板号315
参考博文
https://blog.csdn.net/Mo_nor/article/details/139669284
十一.部署nfs服务器
1.搭建nfs服务器
建议k8s集群内的所有的节点都安装nfs-utils软件,因为节点服务器里创建卷需要支持nfs网络文件系统
yum install nfs-utils -y
启动nfs服务,并且设置nfs服务开机启动
[root@nfs ~]# systemctl start nfs
[root@nfs ~]# systemctl enable nfs
2.新建共享目录和index.html网页
[root@nfs ~]# mkdir /web/data -p
[root@nfs ~]# cd /web/data
[root@nfs data]# vim index.html
[root@nfs data]# cat index.html
welcome to sanchuang !!
[root@nfs data]#
3.设置共享目录/web/data
[root@nfs ~]# vim /etc/exports
/web/data 192.168.203.0/24(rw,sync,all_squash)
4.刷新nfs或者重新输出共享目录
[root@master web]# exportfs -a 输出所有共享目录
[root@master web]# exportfs -v 显示输出的共享目录
/sc/web
192.168.2.0/24(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
[root@master web]# exportfs -r 重新输出所有的共享目录
[root@master web]# exportfs -v
/sc/web 192.168.2.0/24(sync,wdelay,hide,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash)
[root@master web]#
[root@nfs data]# service nfs restart 重新nfs服务
Redirecting to /bin/systemctl restart nfs.service
[root@nfs data]# exportfs -rv
exporting 192.168.203.*:/web/data
[root@nfs data]#
设置/web/data目录777权限,允许nfsnobody用户可以去写
[root@nfs data]# ll -d /web/data
drwxr-xr-x 2 root root 24 1月 29 16:01 /web/data
[root@nfs data]# chmod 777 /web/data/
[root@nfs data]# ll -d /web/data
drwxrwxrwx 2 root root 24 1月 29 16:01 /web/data
[root@nfs data]#
5.在k8s集群里的任意一个节点服务器上测试能否挂载nfs服务器共享的目录
[root@node1 data]# mkdir /pv_pvc_nfs 新建挂载点目录
[root@node1 data]# mount 192.168.203.128:/web/data /pv_pvc_nfs/ 挂载
[root@node1 data]# df -Th|grep nfs
192.168.203.128:/web/data nfs4 50G 17G 34G 34% /pv_pvc_nfs
之后在nfs服务器修改共享目录的内容,节点服务器上挂载的目录也变了
6.创建 ServiceAccount
[root@k8s-master-1 nfs]# cat nfs-rbac.yaml
apiVersion: v1
kind: Namespace
metadata:
name: dev
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
namespace: dev
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: dev
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: dev
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: dev
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: dev
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
[root@k8s-node-2 nfs]#
[root@k8s-master-1 nfs]# kubectl create ns dev
[root@k8s-master-1 nfs]# kubectl apply -f nfs-rbac.yaml
namespace/dev unchanged
serviceaccount/nfs-client-provisioner unchanged
clusterrole.rbac.authorization.k8s.io/nfs-client-provisioner-runner unchanged
clusterrolebinding.rbac.authorization.k8s.io/run-nfs-client-provisioner unchanged
role.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner unchanged
rolebinding.rbac.authorization.k8s.io/leader-locking-nfs-client-provisioner unchanged
[root@k8s-node-2 nfs]#
7.部署NFS-Subdir-External-Provisioner
[root@k8s-master-1 nfs]# cat nfs-provisioner-deploy.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
namespace: dev
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate #设置升级策略为删除再创建(默认为滚动更新)
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner #上一步创建的ServiceAccount名称
containers:
- name: nfs-client-provisioner
image: registry.cn-beijing.aliyuncs.com/mydlq/nfs-subdir-external-provisioner:v4.0.0
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME # Provisioner的名称,以后设置的storageclass要和这个保持一致
value: storage-nfs
- name: NFS_SERVER # NFS服务器地址,需和valumes参数中配置的保持一致
value: 192.168.186.154
- name: NFS_PATH # NFS服务器数据存储目录,需和valumes参数中配置的保持一致
value: /nfs/web
- name: ENABLE_LEADER_ELECTION
value: "true"
volumes:
- name: nfs-client-root
nfs:
server: 192.168.186.154 # NFS服务器地址
path: /nfs/web # NFS共享目录
[root@k8s-master-1 nfs]# kubectl apply -f nfs-provisioner-deploy.yaml
deployment.apps/nfs-client-provisioner created
[root@k8s-master-1 nfs]# kubectl get pod -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nfs-client-provisioner-d55f7f849-8589f 1/1 Running 0 22s 10.224.140.177 k8s-node-2 <none> <none>
8.创建 NFS StorageClass
我们在创建 PVC 时经常需要指定 storageClassName 名称,这个参数配置的就是一个 StorageClass 资源名称,PVC 通过指定该参数来选择使用哪个 StorageClass,并与其关联的 Provisioner 组件来动态创建 PV 资源。所以,这里我们需要提前创建一个 Storagelcass 资源。
[root@k8s-master-1 nfs]# vim nfs-storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
namespace: dev
name: nfs-storage
annotations:
storageclass.kubernetes.io/is-default-class: "false" ## 是否设置为默认的storageclass
provisioner: storage-nfs ## 动态卷分配者名称,必须和上面创建的deploy中环境变量“PROVISIONER_NAME”变量值一致
parameters:
archiveOnDelete: "true" ## 设置为"false"时删除PVC不会保留数据,"true"则保留数据
mountOptions:
- hard ## 指定为硬挂载方式
- nfsvers=4 ## 指定NFS版本,这个需要根据NFS Server版本号设置
[root@k8s-master-1 nfs]# kubectl apply -f nfs-storageclass.yaml
storageclass.storage.k8s.io/nfs-storage created
[root@k8s-master-1 nfs]# kubectl get sc
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
nfs-storage storage-nfs Delete Immediate false 73s
9.测试PVC使用StorageClass
创建storage-pvc.yaml
[root@k8s-master-1 nfs]# cat storage-pvc.yaml
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: storage-pvc
namespace: dev
spec:
storageClassName: nfs-storage ## 需要与上面创建的storageclass的名称一致
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
[root@k8s-master-1 nfs]# kubectl apply -f storage-pvc.yaml
persistentvolumeclaim/storage-pvc created
[root@k8s-master-1 nfs]# kubectl get pvc -n dev
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
storage-pvc Bound pvc-bf7d70f8-610f-4516-b3c6-4f7091b269c4 100Mi RWO nfs-storage 5s
查看pv也自动创建了
[root@k8s-master-1 nfs]# kubectl get pv -n dev
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
pvc-bf7d70f8-610f-4516-b3c6-4f7091b269c4 100Mi RWO Delete Bound dev/storage-pvc nfs-storage 41
使用存储类的好处,就是我们pvc可以直接使用存储类里所有的空间,自动去创建pv
10.创建pod去使用pvc
namespace必须和pvc的namespace一样
[root@k8s-master-1 nfs]# cat pod.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment-nfs
namespace: dev
labels:
app: nginx-nfs
spec:
replicas: 1
selector:
matchLabels:
app: nginx-nfs
template:
metadata:
labels:
app: nginx-nfs
spec:
#定义卷
volumes:
- name: pv-storage-nfs
persistentVolumeClaim:
claimName: storage-pvc
containers:
- name: nginx
image: nginx:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
#容器里调用卷
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: pv-storage-nfs
[root@k8s-master-1 nfs]# kubectl apply -f pod.yaml
deployment.apps/nginx-deployment-nfs created
[root@k8s-master-1 nfs]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
nfs-client-provisioner-d55f7f849-8589f 1/1 Running 0 19m
nginx-deployment-nfs-75585464b9-2n2gl 1/1 Running 0
修改下nfs服务器上的首页文件
[root@nfs ~]# cd /nfs/web
[root@nfs web]# ls
dev-storage-pvc-pvc-bf7d70f8-610f-4516-b3c6-4f7091b269c4 index.html
[root@nfs web]# cat index.html
Hello from Kubernetes storage
访问使用存储类的pod的首页
[root@k8s-master-1 data]# kubectl get pod -o wide -n dev
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nfs-client-provisioner-d55f7f849-8589f 1/1 Running 0 21m 10.224.140.177 k8s-node-2 <none> <none>
nginx-deployment-nfs-75585464b9-2n2gl 1/1 Running 0 2m10s 10.224.109.103 k8s-node-1 <none> <none>
发现没有页面,进入pod查看html目录
[root@k8s-node-2 data]# curl 10.224.109.103
<html>
<head><title>403 Forbidden</title></head>
<body>
<center><h1>403 Forbidden</h1></center>
<hr><center>nginx/1.27.0</center>
</body>
</html>
[root@k8s-node-2 data]# kubectl exec -n dev -it nginx-deployment-nfs-75585464b9-2wlfv -- bash
root@nginx-deployment-nfs-75585464b9-2wlfv:/# cd /usr/share/nginx/html/
root@nginx-deployment-nfs-75585464b9-2wlfv:/usr/share/nginx/html# ls
root@nginx-deployment-nfs-75585464b9-2wlfv:/usr/share/nginx/html#
解决问题:
因为pvc会在存储类对应的nfs服务器上自动创建一个文件夹,就是我们pod会去使用的文件夹,所以我们的首页需要存放在这个文件夹里
dev-storage-pvc-pvc-bf7d70f8-610f-4516-b3c6-4f7091b269c4 --》自动创建文件夹
[root@nfs web]# ls
dev-storage-pvc-pvc-bf7d70f8-610f-4516-b3c6-4f7091b269c4 index.html
复制首页文件到pvc对应的文件夹
[root@nfs web]# cp index.html dev-storage-pvc-pvc-bf7d70f8-610f-4516-b3c6-4f7091b269c4/
进去查看
[root@nfs data]# cd dev-storage-pvc-pvc-bf7d70f8-610f-4516-b3c6-4f7091b269c4/
[root@nfs dev-storage-pvc-pvc-bf7d70f8-610f-4516-b3c6-4f7091b269c4]# ls
index.html
[root@nfs dev-storage-pvc-pvc-bf7d70f8-610f-4516-b3c6-4f7091b269c4]#
再次去访问pod,发现能正常查看到首页了
[root@k8s-master-1 ~]# kubectl get pod -o wide -n dev
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nfs-client-provisioner-d55f7f849-8589f 1/1 Running 0 30m 10.224.140.177 k8s-node-2 <none> <none>
nginx-deployment-nfs-75585464b9-2n2gl 1/1 Running 0 10m 10.224.109.103 k8s-node-1 <none> <none>
[root@k8s-master-1 ~]# curl 10.224.109.103
Hello from Kubernetes storage
十二.HPA
1.先启动一个 Deployment 用 hpa-example 镜像运行一个容器, 然后使用以下清单文件将其暴露为一个 服务(Service) --限制cpu使用份额
[root@k8s-master-1 k8s_yaml]# cat hpa.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: php-apache
spec:
selector:
matchLabels:
run: php-apache
template:
metadata:
labels:
run: php-apache
spec:
containers:
- name: php-apache
image: k8s.gcr.io/hpa-example:latest
imagePullPolicy: IfNotPresent #重点
ports:
- containerPort: 80
resources:
limits:
cpu: 500m
requests:
cpu: 200m
---
apiVersion: v1
kind: Service
metadata:
name: php-apache
labels:
run: php-apache
spec:
ports:
- port: 80
selector:
run: php-apache
编写hpa.yaml
限制cpu使用率和内存使用
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: php-apache
spec:
scaleTargetRef:
apiVevimrsion: apps/v1
kind: Deployment
name: php-apache
minReplicas: 1
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target: v
type: Utilization
averageUtilization: 50
- type: Resource
resource:
name: memory
target:
type: AverageValue
averageValue: 500Mi
[root@k8s-master-1 hpa]# kubectl apply -f hpa.yaml
2.创建HPA (HorizontalPodAutoscaler)
[root@k8s-master-1 hpa]#kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10
[root@k8s-master-1 hpa]#kubectl get hpa
3.增加负载
[root@k8s-master-1 hpa]#kubectl run -i --tty load-generator --rm --image=busybox:1.28 --restart=Never -- /bin/sh -c "while sleep 0.01; do wget -q -O- http://php-apache; done"
kubectl run -i --tty load-generator --rm --image=busybox:1.28 --restart=Never -- /bin/sh -c "while sleep 0.01; do wget -q -O- http://nginx-hpa; done"
可以看到生成了多个pod
4.查看
[root@k8s-master-1 hpa]#kubectl get hpa php-apache --watch
十三.使用探针检查项目pod是否能正常运行
编写yaml文件
apiVersion: v1
kind: Pod
metadata:
name: my-go-app-pod
spec:
containers:
- name: my-go-app-container
image: my-go-app:latest
args:
- /bin/sh
- -c
- touch /tmp/healthy; sleep 30; rm -f /tmp/healthy; sleep 600
livenessProbe:
httpGet:
path: /health
port: 8080
initialDelaySeconds: 10
periodSeconds: 5
readinessProbe:
httpGet:
path: /ready
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
startupProbe:
httpGet:
path: /healthz
port: liveness-port
failureThreshold: 30
periodSeconds: 10
运行pod
[root@k8s-master-1 probes]# kubectl apply -f probes.yaml
pod/my-go-app-pod created
参考
https://kubernetes.io/zh-cn/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#http-probes
十四.部署ingress实现基于url的负载均衡
1.将镜像scp到所有的node节点服务器上
[root@k8s-master-1 ingress]# scp ingress-nginx-controllerv1.1.0.tar.gz node-1:/root
ingress-nginx-controllerv1.1.0.tar.gz 100% 276MB 42.7MB/s 00:06
[root@k8s-master-1 ingress]# scp ingress-nginx-controllerv1.1.0.tar.gz node-2:/root
ingress-nginx-controllerv1.1.0.tar.gz 100% 276MB 45.7MB/s 00:06
[root@k8s-master-1 ingress]# scp kube-webhook-certgen-v1.1.0.tar.gz node-2:/root
kube-webhook-certgen-v1.1.0.tar.gz 100% 47MB 40.5MB/s 00:01
[root@k8s-master-1 ingress]# scp kube-webhook-certgen-v1.1.0.tar.gz node-1:/root
kube-webhook-certgen-v1.1.0.tar.gz 100% 47MB 47.1MB/s 00:00
2.导入镜像,在所有的节点服务器(node-1和node-2)上进行
[root@k8s-node-1 ingress]# docker load -i ingress-nginx-controllerv1.1.0.tar.gz
[root@k8s-node-1 ingress]# docker load -i kube-webhook-certgen-v1.1.0.tar.gz
[root@k8s-node-1 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller v1.1.0 ae1a7201ec95 16 months ago 285MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen v1.1.1 c41e9fcadf5a 17 months ago 47.7MB
[root@k8s-node-2 ingress]# docker load -i ingress-nginx-controllerv1.1.0.tar.gz
[root@k8s-node-2 ingress]# docker load -i kube-webhook-certgen-v1.1.0.tar.gz
[root@k8s-node-2 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller v1.1.0 ae1a7201ec95 16 months ago 285MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen v1.1.1 c41e9fcadf5a 17 months ago 47.7MB
3.使用ingress-controller-deploy.yaml 文件去启动ingress controller
[root@k8s-master-1 ingress]# ls
ingress-controller-deploy.yaml kube-webhook-certgen-v1.1.0.tar.gz sc-ingress.yaml
ingress-nginx-controllerv1.1.0.tar.gz nginx-deployment-nginx-svc-2.yaml sc-nginx-svc-1.yaml
Role-based access control (RBAC) is a method of regulating access to computer or network resources based on the roles of individual users within your organization.
基于角色的访问控制(RBAC)是一种基于组织中各个用户的角色来调节对计算机或网络资源的访问的方法。
执行ingress-controller-deploy.yaml 文件,去启动ingress controller
[root@k8s-master-1 ingress]# kubectl apply -f ingress-controller-deploy.yaml
namespace/ingress-nginx created
serviceaccount/ingress-nginx created
configmap/ingress-nginx-controller created
clusterrole.rbac.authorization.k8s.io/ingress-nginx created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx created
role.rbac.authorization.k8s.io/ingress-nginx created
rolebinding.rbac.authorization.k8s.io/ingress-nginx created
service/ingress-nginx-controller-admission created
service/ingress-nginx-controller created
deployment.apps/ingress-nginx-controller created
ingressclass.networking.k8s.io/nginx created
validatingwebhookconfiguration.admissionregistration.k8s.io/ingress-nginx-admission created
serviceaccount/ingress-nginx-admission created
clusterrole.rbac.authorization.k8s.io/ingress-nginx-admission created
clusterrolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
role.rbac.authorization.k8s.io/ingress-nginx-admission created
rolebinding.rbac.authorization.k8s.io/ingress-nginx-admission created
job.batch/ingress-nginx-admission-create created
job.batch/ingress-nginx-admission-patch created
查看ingress controller的相关命名空间
[root@k8s-master-1 ingress]# kubectl get ns
NAME STATUS AGE
default Active 11d
ingress-nginx Active 52s
kube-node-lease Active 11d
kube-public Active 11d
kube-system Active 11d
sanchuang Active 10d
查看ingress controller的相关service
[root@k8s-master-1 ingress]# kubectl get svc -n ingress-nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ingress-nginx-controller NodePort 10.99.160.10 <none> 80:30092/TCP,443:30263/TCP 91s
ingress-nginx-controller-admission ClusterIP 10.99.138.23 <none> 443/TCP 91s
查看ingress controller的相关pod
[root@k8s-master-1 ingress]# kubectl get pod -n ingress-nginx
NAME READY STATUS RESTARTS AGE
ingress-nginx-admission-create-k69t2 0/1 Completed 0 119s
ingress-nginx-admission-patch-zsrk8 0/1 Completed 1 119s
ingress-nginx-controller-6c8ffbbfcf-bt94p 1/1 Running 0 119s
ingress-nginx-controller-6c8ffbbfcf-d49kx 1/1 Running 0 119s
4.准备yaml文件并执行
[root@k8s-master-1 ingress]# ll
总用量 330196
-rw-r--r-- 1 root root 288980480 9月 24 16:50 ingress-nginx-controllerv1.1.0.tar.gz
-rw-r--r-- 1 root root 532 9月 24 20:32 ingress-url.yaml
-rw-r--r-- 1 root root 584 9月 24 20:10 nginx-1.yaml
-rw-r--r-- 1 root root 598 9月 24 20:34 nginx-2.yaml
[root@k8s-master-1 ingress]# cat ingress-controller-deploy.yaml
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
allow-snippet-annotations: 'true'
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
- namespaces
verbs:
- list
- watch
- apiGroups:
- ''
resources:
- nodes
verbs:
- get
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- namespaces
verbs:
- get
- apiGroups:
- ''
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- ingress-controller-leader
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-service-webhook.yaml
apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
type: ClusterIP
ports:
- name: https-webhook
port: 443
targetPort: webhook
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: NodePort
ipFamilyPolicy: SingleStack
ipFamilies:
- IPv4
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
appProtocol: http
- name: https
port: 443
protocol: TCP
targetPort: https
appProtocol: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
revisionHistoryLimit: 10
minReadySeconds: 0
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
spec:
hostNetwork: true
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
topologyKey: kubernetes.io/hostname
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: controller
image: registry.cn-hangzhou.aliyuncs.com/google_containers/nginx-ingress-controller:v1.1.0
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-controller-leader
- --controller-class=k8s.io/ingress-nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
- name: webhook
containerPort: 8443
protocol: TCP
volumeMounts:
- name: webhook-cert
mountPath: /usr/local/certificates/
readOnly: true
resources:
requests:
cpu: 100m
memory: 90Mi
nodeSelector:
kubernetes.io/os: linux
serviceAccountName: ingress-nginx
terminationGracePeriodSeconds: 300
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
# Source: ingress-nginx/templates/controller-ingressclass.yaml
# We don't support namespaced ingressClass yet
# So a ClusterRole and a ClusterRoleBinding is required
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: controller
name: nginx
namespace: ingress-nginx
spec:
controller: k8s.io/ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
# before changing this value, check the required kubernetes version
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
name: ingress-nginx-admission
webhooks:
- name: validate.nginx.ingress.kubernetes.io
matchPolicy: Equivalent
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: Fail
sideEffects: None
admissionReviewVersions:
- v1
clientConfig:
service:
namespace: ingress-nginx
name: ingress-nginx-controller-admission
path: /networking/v1/ingresses
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ingress-nginx-admission
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- ''
resources:
- secrets
verbs:
- get
- create
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade,post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-create
namespace: ingress-nginx
annotations:
helm.sh/hook: pre-install,pre-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-create
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: create
image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v1.1.1
imagePullPolicy: IfNotPresent
args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 2000
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-patch
namespace: ingress-nginx
annotations:
helm.sh/hook: post-install,post-upgrade
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-patch
labels:
helm.sh/chart: ingress-nginx-4.0.10
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/version: 1.1.0
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: patch
image: registry.cn-hangzhou.aliyuncs.com/google_containers/kube-webhook-certgen:v1.1.1
imagePullPolicy: IfNotPresent
args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
securityContext:
allowPrivilegeEscalation: false
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
nodeSelector:
kubernetes.io/os: linux
securityContext:
runAsNonRoot: true
runAsUser: 2000
[root@k8s-master-1 ingress]# cat ingress-url.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: simple-fanout-example
annotations:
kubernets.io/ingress.class: nginx
spec:
ingressClassName: nginx
rules:
- host: www.wen.com
http:
paths:
- path: /foo
pathType: Prefix
backend:
service:
name: nginx-svc-1
port:
number: 80
- path: /bar
pathType: Prefix
backend:
service:
name: nginx-svc-2
port:
number: 80
[root@k8s-master-1 ingress]# cat nginx-1.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deploy-1
labels:
app: nginx-1
spec:
selector:
matchLabels:
app: nginx-1
template:
metadata:
labels:
app: nginx-1
spec:
containers:
- name: nginx-1
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx-svc-1
labels:
app: nginx-svc-1
spec:
selector:
app: nginx-1
ports:
- name: name-of-service-port
protocol: TCP
port: 80
targetPort: 80
[root@k8s-master-1 ingress]# cat nginx-2.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deploy-2
labels:
app: nginx-2
spec:
replicas: 2
selector:
matchLabels:
app: nginx-2
template:
metadata:
labels:
app: nginx-2
spec:
containers:
- name: nginx-2
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: nginx-svc-2
labels:
app: nginx-svc-2
spec:
selector:
app: nginx-2
ports:
- name: name-of-service-port
protocol: TCP
port: 80
targetPort: 80
[root@k8s-master-1 ingress]# kubectl get pod -n ingress-nginx
NAME READY STATUS RESTARTS AGE
ingress-nginx-admission-create-r94bv 0/1 Completed 0 110m
ingress-nginx-admission-patch-85kxw 0/1 Completed 0 105m
ingress-nginx-controller-68d5d5f554-2sf7b 1/1 Running 0 110m
ingress-nginx-controller-68d5d5f554-zphqq 1/1 Running 0 110m
有时候ingress-controller会启动失败,将adminssion删除,让deployment重新生成一个即可。
5.测试
在nfs服务器上进行测试,需要在/etc/hosts文件里添加域名解析记录
[root@nfs ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.186.150 www.wen.com
192.168.186.151 www.wen.com
测试发现不能找到页面,到底是ingress controller的问题还是我们后端的pod的问题
[root@nfs ~]# curl www.wen.com/foo
<html>
<head><title>404 Not Found</title></head>
<body>
<center><h1>404 Not Found</h1></center>
<hr><center>nginx/1.21.5</center>
</body>
</html>
[root@nfs ~]# curl www.wen.com/bar
<html>
<head><title>404 Not Found</title></head>
<body>
<center><h1>404 Not Found</h1></center>
<hr><center>nginx/1.21.5</center>
</body>
</html>
查看ingress-nginx-controller pod的信息
[root@k8s-master-1 ingress]# kubectl get pod -n ingress-nginx
NAME READY STATUS RESTARTS AGE
ingress-nginx-admission-create-lc8gj 0/1 Completed 0 17h
ingress-nginx-admission-patch-fs7ft 0/1 Completed 1 17h
ingress-nginx-controller-6c8ffbbfcf-4j872 1/1 Running 1 17h
ingress-nginx-controller-6c8ffbbfcf-qr62z 1/1 Running 1 17h
[root@master lb-url]#
思路:
1.进入ingress控制器的内部查看是否加载基于URL的负载均衡的配置
[root@master lb-url]# kubectl exec -it ingress-nginx-controller-6c8ffbbfcf-4j872 -n ingress-nginx -- bash
bash-5.1$ cat nginx.conf|grep www.wen.com
## start server www.wen.com
server_name www.wen.com ;
## end server www.wen.com
bash-5.1$
2.看pod的日志和详细信息
[root@k8s-master-1 ingress]# kubectl logs ingress-nginx-controller-6c8ffbbfcf-4j872 -n ingress-nginx
[root@k8s-master-1 ingress]# kubectl logs ingress-nginx-controller-6c8ffbbfcf-qr62z -n ingress-nginx -f
192.168.203.131 - - [08/Sep/2023:02:38:57 +0000] "GET /bar HTTP/1.1" 404 153 "-" "curl/7.29.0" 78 0.002 [default-sc-nginx-svc-4-80] [] 10.244.84.154:80 153 0.002 404 743b6f6eca4bb7ace60641dfd1d02b33
192.168.203.131 - - [08/Sep/2023:02:38:58 +0000] "GET /bar HTTP/1.1" 404 153 "-" "curl/7.29.0" 78 0.000 [default-sc-nginx-svc-4-80] [] 10.244.247.13:80 153 0.001 404 c7590416112b649fc2a5456152581fe0
192.168.203.131 - - [08/Sep/2023:02:38:59 +0000] "GET /bar HTTP/1.1" 404 153 "-" "curl/7.29.0" 78 0.002 [default-sc-nginx-svc-4-80] [] 10.244.84.154:80 153 0.002 404 edad15d8f1bebcf006535d61b29f8a6f
192.168.203.131 - - [08/Sep/2023:02:39:39 +0000] "GET /bar HTTP/1.1" 404 153 "-" "curl/7.29.0" 78 0.003 [default-sc-nginx-svc-4-80] [] 10.244.247.13:80 153 0.004 404 001592532f6226dd0acac818f90d1050
查看最后提供web服务器的pod的日志
[root@k8s-master-1 ingress]# kubectl logs sc-nginx-deploy-4-766c99dd77-7twnm -f
2023/09/08 02:38:57 [error] 32#32: *1 open() "/usr/share/nginx/html/bar" failed (2: No such file or directory), client: 10.244.247.0, server: localhost, request: "GET /bar HTTP/1.1", host: "www.wen.com"
10.244.247.0 - - [08/Sep/2023:02:38:57 +0000] "GET /bar HTTP/1.1" 404 153 "-" "curl/7.29.0" "192.168.203.131"
/usr/share/nginx/html/bar 文件夹不存在,导致404错误
进入service4 对应的一个pod里,新建bar和foo文件夹以及index.html网页文件
[root@k8s-master-1 ingress]# kubectl exec -it sc-nginx-deploy-4-766c99dd77-7twnm -- bash
root@sc-nginx-deploy-4-766c99dd77-7twnm:/# cd /usr/share/nginx/html/
root@sc-nginx-deploy-4-766c99dd77-7twnm:/usr/share/nginx/html# ls
50x.html index.html
root@sc-nginx-deploy-4-766c99dd77-7twnm:/usr/share/nginx/html# mkdir bar
root@sc-nginx-deploy-4-766c99dd77-7twnm:/usr/share/nginx/html# echo "hello,bar" >bar/index.html
root@sc-nginx-deploy-4-766c99dd77-7twnm:/usr/share/nginx/html# ls
50x.html bar foo index.html
root@sc-nginx-deploy-4-766c99dd77-7twnm:/usr/share/nginx/html# cd bar
root@sc-nginx-deploy-4-766c99dd77-7twnm:/usr/share/nginx/html/bar# cat index.html
hello,bar
root@sc-nginx-deploy-4-766c99dd77-7twnm:/usr/share/nginx/html/bar# ls
index.html
root@sc-nginx-deploy-4-766c99dd77-7twnm:/usr/share/nginx/html/bar#
再次在nfs服务器上测试,多测试几次,因为service 背后的ipvs的调度算法是轮询的
[root@nfs ~]# curl www.wen.com/foo/index.html
<html>
<head><title>404 Not Found</title></head>
<body>
<center><h1>404 Not Found</h1></center>
<hr><center>nginx/1.21.5</center>
</body>
</html>
[root@nfs ~]# curl www.wen.com/bar/index.html
hello,bar
6.使用ab压力测试
[root@k8s-master-1 ingress]# ab -n 100000 -c 100 www.wen.com/bar/index.html
This is ApacheBench, Version 2.3 <$Revision: 1430300 $>
Copyright 1996 Adam Twiss, Zeus Technology Ltd, http://www.zeustech.net/
Licensed to The Apache Software Foundation, http://www.apache.org/
Benchmarking www.wen.com (be patient)
Completed 10000 requests
Completed 20000 requests
Completed 30000 requests
Completed 40000 requests
Completed 50000 requests
Completed 60000 requests
Completed 70000 requests
Completed 80000 requests
Completed 90000 requests
Completed 100000 requests
Finished 100000 requests
Server Software:
Server Hostname: www.wen.com
Server Port: 80
Document Path: /bar/index.html
Document Length: 615 bytes
Concurrency Level: 100
Time taken for tests: 27.145 seconds
Complete requests: 100000
Failed requests: 0
Write errors: 0
Total transferred: 82600000 bytes
HTML transferred: 61500000 bytes
Requests per second: 3683.96 [#/sec] (mean)
Time per request: 27.145 [ms] (mean)
Time per request: 0.271 [ms] (mean, across all concurrent requests)
Transfer rate: 2971.64 [Kbytes/sec] received
Connection Times (ms)
min mean[+/-sd] median max
Connect: 0 1 1.3 0 42
Processing: 0 26 23.7 20 209
Waiting: 0 26 23.7 19 208
Total: 1 27 23.8 21 209
Percentage of the requests served within a certain time (ms)
50% 21
66% 39
75% 48
80% 51
90% 58
95% 65
98% 76
99% 87
100% 209 (longest request)
分析结果
性能概况:服务器在高并发请求下表现良好,处理了100,000个请求而没有失败的请求。
处理能力:服务器能够处理的平均请求数为每秒3683.96个请求,这表明它能够处理相对较高的负载。
响应时间:大多数请求(50%)在21毫秒内得到响应,但有少数请求(100%中的209毫秒)响应时间较长。这可能表明在高负载下,某些请求的响应时间可能较慢。
十五.构建CI/CD环境,安装gitlab、harbor和Jenkins实现代码发布和镜像制作
1.gitlab
1.1安装gitlab
1. 安装和配置所需的依赖
在 CentOS 7 上,下面的命令会在系统防火墙中打开 HTTP、HTTPS 和 SSH 访问。这是一个可选步骤,如果您打算仅从本地网络访问极狐GitLab,则可以跳过它。
[root@ansible ~]# sudo yum install -y curl policycoreutils-python openssh-server perl
2. 下载并安装极狐GitLab
执行以下命令配置极狐GitLab 软件源镜像。
[root@ansible ~]# curl -fsSL https://get.gitlab.cn | /bin/bash
==> Detected OS centos
==> Add yum repo file to /etc/yum.repos.d/gitlab-jh.repo
[gitlab-jh]
name=JiHu GitLab
baseurl=https://packages.gitlab.cn/repository/el/$releasever/
gpgcheck=1
gpgkey=https://packages.gitlab.cn/repository/raw/gpg/public.gpg.key
priority=1
enabled=1
==> Generate yum cache for gitlab-jh
==> Successfully added gitlab-jh repo. To install JiHu GitLab, run "sudo yum/dnf install gitlab-jh".
3.下载
[root@ansible ~]# yum install gitlab-jh -y
4.修改gitlab配置
[root@nfs-server ~]# vim /etc/gitlab/gitlab.rb
external_url 'http://gitlab.sc.com' #修改域名
5.启动极狐
[root@nfs-server ~]#sudo gitlab-ctl reconfigure #启动极狐GITlab
1.2 登录gilab
http方式去访问
http://192.168.186.153/users/sign_in
用户名是root,密码是下方Password后面部分
[root@ansible yum.repos.d]# cat /etc/gitlab/initial_root_password
# WARNING: This value is valid only in the following conditions
# 1. If provided manually (either via `GITLAB_ROOT_PASSWORD` environment variable or via `gitlab_rails['initial_root_password']` setting in `gitlab.rb`, it was provided before database was seeded for the first time (usually, the first reconfigure run).
# 2. Password hasn't been changed manually, either via UI or via command line.
#
# If the password shown here doesn't work, you must reset the admin password following https://docs.gitlab.com/ee/security/reset_user_password.html#reset-your-root-password.
Password: CNnq8mn8akpQXF22fJrxf2skC2lFODForIgegeTU69U=
# NOTE: This file will be automatically deleted in the first reconfigure run after 24 hours.
3.使用gitlab
参考如下博文,使用方法和GitHub基本一致
https://blog.csdn.net/cdns999/article/details/135354894?fromshare=blogdetail&sharetype=blogdetail&sharerId=135354894&sharerefer=PC&sharesource=qq_63900813&sharefrom=from_link
2.jenkins
使用k8s安装部署
参考如下
https://blog.csdn.net/cdns999/article/details/135354894?fromshare=blogdetail&sharetype=blogdetail&sharerId=135354894&sharerefer=PC&sharesource=qq_63900813&sharefrom=from_link
3.harbor
1.环境准备
参照(三.配置docker环境)
2.下载harbor源码并安装
[root@docker Dockerfile]# mkdir harbor
[root@docker Dockerfile]# cd harbor/
[root@docker harbor]# ls
harbor-offline-installer-v2.1.0.tgz
#对文件进行解压
[root@docker harbor]# tar xf harbor-offline-installer-v2.1.0.tgz
[root@docker harbor]# cp harbor.yml.tmpl harbor.yml
修改配置文件
[root@docker harbor]# vim harbor.yml
hostname: 192.168.186.153 #修改
# http related config
http:
# port for http, default is 80. If https enabled, this port will redirect to https port
port: 8090 #修改
#如果没有证书,可以在配置文件中注释掉https的相关内容
执行安装操作
[root@docker harbor]# ./install.sh
3.访问
在windows机器上访问网站,配置harbor
默认登陆的用户名和密码:
admin Harbor12345
在harbor中创建一个项目sanchuang
新建自己的用户名和密码
4.本机上传拉取镜像
在另外一台docker宿主机上使用这个仓库:
[root@docker ~]# cat /etc/docker/daemon.json
{
"registry-mirrors": ["https://registry.docker-cn.com"],
"insecure-registries" : ["192.168.186.153:8090"]
}
#重启docker
[root@docker ~]# systemctl daemon-reload
[root@docker ~]# systemctl restart docker
之后和docker操作一样
修改镜像名字:
[root@docker ~]# docker tag nginx:latest 192.168.186.153:8090/sanchuang/nginx:latest
登陆私有仓库:
[root@docker harbor]# docker login 192.168.186.153:8090
Username: lay
Password:
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store
Login Succeeded
推送镜像到私人仓库:
[root@docker harbor]# docker push 192.168.186.153:8090/sanchuang/nginx:latest