1、在云主机编译安装nginx(添加模块nginx-module-vts),然后通过prometheus实现指标采集(nginx-vts-exporter)、自定义nginx容器镜像、并基于kubernetes运行nginx,然后通过prometheus实现指标采集
在云主机编译安装nginx,然后通过prometheus实现监控
https://github.com/vozlt/nginx-module-vts root@prometheus-node2:~# cd /usr/local/src/ root@prometheus-node2:/usr/local/src# git clone https://github.com/vozlt/nginx-module-vts.git root@prometheus-node2:/usr/local/src# wget https://nginx.org/download/nginx-1.24.0.tar.gz root@prometheus-node2:/usr/local/src# apt install iproute2 ntpdate tcpdump telnet traceroute nfs-kernel-server nfs-common lrzsz tree openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev gcc openssh-server iotop unzip zip make root@prometheus-node2:/usr/local/src# tar xvf nginx-1.24.0.tar.gz && cd nginx-1.24.0/ root@prometheus-server2:/usr/local/src/nginx-1.24.0# ./configure --prefix=/apps/nginx \--with-http_ssl_module \--with-http_v2_module \--with-http_realip_module \--with-http_stub_status_module \--with-http_gzip_static_module \--with-pcre \--with-file-aio \--with-stream \--with-stream_ssl_module \--with-stream_realip_module \--add-module=/usr/local//src/nginx-module-vts/root@prometheus-node2:/usr/local/src/nginx-1.22.1# make && make install
vim /apps/nginx/conf/nginx.conf
#gzip on;
vhost_traffic_status_zone; #启用状态页
server范围配置:
server {
listen 80;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
root html;
index index.html index.htm;
proxy_pass http://192.168.220.200:9090;
}
location /status {
vhost_traffic_status_display;
vhost_traffic_status_display_format html;
}
}
root@prometheus-node2:/usr/local/src/nginx-1.24.0# /apps/nginx/sbin/nginx -t root@prometheus-node2:/usr/local/src/nginx-1.24.0# /apps/nginx/sbin/nginx
![](https://i-blog.csdnimg.cn/blog_migrate/13831f3fbdd42e2d1123f25829b503f1.png)
root@prometheus-server2:/usr/local/src# wget https://github.com/hnlq715/nginx-vts-exporter/releases/download/v0.10.3/nginx-vts-exporter-0.10.3.linux-amd64.tar.gz root@prometheus-node2:/usr/local/src# tar xvf nginx-vts-exporter-0.10.3.linux-amd64.tar.gz root@prometheus-node2:/usr/local/src# cp nginx-vts-exporter-0.10.3.linux-amd64/nginx-vts-exporter /usr/local/bin/cat /etc/systemd/system/nginx-vts-exporter.service
[Unit]
Description=nginx-vts-exporter
After=network.target
[Service]
ExecStart=/usr/local/bin/nginx-vts-exporter -nginx.scrape_uri http://192.168.220.200/status/format/json
[Install]
WantedBy=multi-user.target
systemctl start nginx-vts-exporter.service && systemctl enable nginx-vts-exporter.service
![](https://i-blog.csdnimg.cn/blog_migrate/533b74d12cec30e4b1b962731feeef09.png)
- job_name: 'nginx-nodes'
static_configs:
- targets: ['192.168.220.102:9913']
systemctl restart prometheus.service
![](https://i-blog.csdnimg.cn/blog_migrate/69342668c2f20c06d112d4508448cdeb.png)
![](https://i-blog.csdnimg.cn/blog_migrate/a90c7ebe832ef3965a2230e4555e9eed.png)
自定义nginx容器镜像、并基于kubernetes运行nginx,然后通过prometheus实现监控
目录编写Dockerfile
FROM ubuntu:22.04
LABEL maintainer="jack 2973707860@qq.com"
ADD nginx-1.22.1.tar.gz /usr/local/src/
ADD nginx-vts-exporter-0.10.3.linux-amd64/nginx-vts-exporter /usr/local/bin/
ADD nginx-module-vts/ /usr/local/src/nginx-module-vts
RUN apt update && apt install -y iproute2 gcc openssh-server lrzsz tree openssl libssl-dev libpcre3 libpcre3-dev zlib1g-dev ntpdate tcpdump telnet traceroute iotop unzip zip make
RUN groupadd -r -g 2088 user1 && useradd -r -m -s /sbin/nologin -u 2088 -g 2088 user1
WORKDIR /usr/local/src/
RUN cd nginx-1.22.1/ && ./configure --prefix=/apps/nginx \
--with-http_ssl_module \
--with-http_v2_module \
--with-http_realip_module \
--with-http_stub_status_module \
--with-http_gzip_static_module \
--with-pcre \
--with-file-aio \
--with-stream \
--with-stream_ssl_module \
--with-stream_realip_module \
--add-module=/usr/local/src/nginx-module-vts/ && make && make install
WORKDIR /root
ADD docker-entrypoint.sh /usr/local/bin/
RUN ln -s /usr/local/bin/docker-entrypoint.sh /entrypoint.sh #
ADD nginx.conf /apps/nginx/conf/nginx.conf
EXPOSE 80 443
ENTRYPOINT ["docker-entrypoint.sh"]
编写nginx和nginx-exporter启动脚本,在nginx-exporter启动时指定nginx的IP
vim docker-entrypoint.sh
#!/bin/bash
/apps/nginx/sbin/nginx
/usr/local/bin/nginx-vts-exporter -nginx.scrape_uri http://127.0.0.1/status/format/json
在该目录准备好镜像构建所需文件开始镜像构建,编写镜像构建上传脚本,把镜像上传到私有仓库
root@k8s-master1:~/1.prometheus-case-files/app-monitor-case/4.nginx-vts-exporter/1.nginx-image# bash build-command.sh
root@k8s-master1:~/1.prometheus-case-files/app-monitor-case/4.nginx-vts-exporter/2.nginx-yaml# pwd/root/1.prometheus-case-files/app-monitor-case/4.nginx-vts-exporter/2.nginx-yamlroot@k8s-master1:~/1.prometheus-case-files/app-monitor-case/4.nginx-vts-exporter/2.nginx-yaml# kubectl apply -f 1.nginx-deployment.yaml -f 2.nginx-svc.yaml
vim prometheus.yml
- job_name: 'kubernetes-nginx-monitor'
static_configs:
- targets: ['192.168.220.111:39913']
systemctl restart prometheus.service
2、Prometheus监控案例—Kubernetes Ingress-Controller(多域名、多Location)
3、通过docker-compose部署kafak并通过kafka-exporter实现指标采集通过docker-compose部署kafka并通过kafka-exporter实现kafka指标采集:
version: '3'
services:
zookeeper-server:
container_name: zookeeper-container
image: registry.cn-hangzhou.aliyuncs.com/zhangshijie/zookeeper:v3.7.0
restart: always
ports:
- "2181:2181"
volumes:
- /etc/localtime:/etc/localtime
- zookeeper_vol:/data
- zookeeper_vol:/datalog
- zookeeper_vol:/logs
kafka-server:
container_name: kafka-container
image: registry.cn-hangzhou.aliyuncs.com/zhangshijie/kafka:2.13-2.8.1
ports:
- "9092:9092"
environment:
#KAFKA_ADVERTISED_HOST_NAME: "172.31.4.3"
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://172.31.4.3:9092 #宿主机ip
KAFKA_ADVERTISED_HOST_NAME: 0.0.0.0
KAFKA_ZOOKEEPER_CONNECT: "zookeeper-server:2181"
KAFKA_LOG_DIRS: "/kafka/logs"
volumes:
- /etc/localtime:/etc/localtime
- kafka_vol:/kafka
restart: always
links:
- zookeeper-server
depends_on:
- zookeeper-server
kafka_manager:
image: registry.cn-hangzhou.aliyuncs.com/zhangshijie/kafka-manager
container_name: kafka-manager-container
ports:
- "9000:9000"
environment:
ZK_HOSTS: "zookeeper-server:2181"
restart: always
volumes:
- /etc/localtime:/etc/localtime
links:
- zookeeper-server
depends_on:
- zookeeper-server
- kafka-server
kafka-exporter:
image: registry.cn-hangzhou.aliyuncs.com/zhangshijie/kafka-exporter:v1.7.0
container_name: kafka-exporter-container
volumes:
- /etc/localtime:/etc/localtime
ports:
- "9308:9308"
restart: always
links:
- kafka-server:kafka
depends_on:
- zookeeper-server
- kafka-server
volumes:
zookeeper_vol:
kafka_vol:
docker-compose up -d
- job_name: 'kafka-monitor'
static_configs:
- targets: ['172.31.4.3:9308']
root@ubuntu22-04-2:/opt/kafka# docker exec -it kafka-container bashbash-5.1# cd /opt/kafka/bash-5.1# ./bin/kafka-topics.sh --create --topic quickstart-events --bootstrap-server localhost:9092Created topic quickstart-events.bash-5.1# ./bin/kafka-console-producer.sh --topic quickstart-events --bootstrap-server localhost:9092>This is my first event>This is my second event
bash-5.1# ./bin/kafka-console-consumer.sh --topic quickstart-events --from-beginning --bootstrap-server localhost:9092This is my first eventThis is my second event
# docker exec -it kafka-container bashbash-5.1# ./bin/kafka-console-consumer.sh --topic mytopic --bootstrap-server localhost:9092 --group myserver --from-beginning
bash-5.1# ./bin/kafka-console-producer.sh --topic mytopic --bootstrap-server localhost:909
bash-5.1# ./bin/kafka-consumer-groups.sh --bootstrap-server localhost:9092 --delete --group myserver
验证grafana 模板11285:
4、基于Prometheus blackbox_exporter对域名可用性、SSL证书有效期、端口可用性进行监控并配置SSL证书有效期不足30天告警
blackbox_exporter通过各种功能模块对网络进行监控,使用时需要在Prometheus配置监控目标,功能模块等参数,Prometheus可以把这些参数传给blackbox_exporter,blackbox_exporter把监控数据返回给Prometheus
查看blackbox_exporter的功能模块配置文件:blackbox.yml
root@prometheus-node2:/apps# wget https://github.com/prometheus/blackbox_exporter/releases/download/v0.23.0/blackbox_exporter-0.23.0.linux-amd64.tar.gzroot@prometheus-node2:/apps# tar xvf blackbox_exporter-0.23.0.linux-amd64.tar.gzroot@prometheus-node2:/apps# ln -sv /apps/blackbox_exporter-0.23.0.linux-amd64 /apps/blackbox_exporter
vim /etc/systemd/system/blackbox-exporter.service
[Unit]
Description=Prometheus Blackbox Exporter
After=network.target
[Service]
Type=simple
User=root
Group=root
ExecStart=/apps/blackbox_exporter/blackbox_exporter \
--config.file=/apps/blackbox_exporter/blackbox.yml \
--web.listen-address=:9115
Restart=on-failure
[Install]
WantedBy=multi-user.target
systemctl restart blackbox-exporter.service && systemctl enable blackbox-exporter.service
验证web界面:
实现URL监控:
vim /apps/prometheus/prometheus.yml
- job_name: 'http_status'
metrics_path: /probe
params:
module: [http_2xx]
static_configs:
- targets: ['http://www.xiaomi.com','http://www.magedu.com']
labels:
instance: http_status
group: web
relabel_configs:
- source_labels: [__address__] #将__address__(当前监控目标URL地址的标签)修改为__param_target,用于传递给blackbox_exporter
target_label: __param_target #标签key为__param_target、value为www.xiaomi.com。key为__param_target、value为www.magedu.com
- source_labels: [__param_target] #非必须,新添加一个标签,key为url,value来自于__param_target的值,用于绘图显示不同的http目标(url)
target_label: url #将监控目标的值与url创建一个label
- target_label: __address__ #新添加一个目标__address__,指向blackbox_exporter服务器地址,用于将监控请求发送给指定的blackbox_exporter服务器
replacement: 192.168.220.200:9115 #指定blackbox_exporter服务器地址
检查prometheus配置文件
/apps/prometheus/promtool check config /apps/prometheus/prometheus.yml
配置SSL证书有效期不足30天告警
- job_name: 'port_status'
metrics_path: /probe
params:
module: [tcp_connect]
static_configs:
- targets: ['172.31.2.101:9100','172.31.2.102:9090','172.31.7.101:22']
labels:
instance: 'port_status'
group: 'port'
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target] #用于grafana显示采集目标
target_label: ip
- target_label: __address__
replacement: 172.31.2.102:9115
- job_name: 'ping_status'
metrics_path: /probe
params:
module: [icmp]
static_configs:
- targets: ['172.31.0.2',"223.6.6.6"]
labels:
instance: 'ping_status'
group: 'icmp'
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target] #用于grafana显示采集目标
target_label: ip
- target_label: __address__
replacement: 172.31.2.102:9115
![](https://i-blog.csdnimg.cn/blog_migrate/7056454edef8c281901bb428b921d103.png)
5、总结Prometheus告警流程、部署AlertManager、了解Prometheus 规则编写、配置邮件告警
global:
resolve_timeout: 1m
smtp_smarthost: 'smtp.qq.com:465'
smtp_from: 'qq邮箱地址'
smtp_auth_username: 'qq邮箱用户名'
smtp_auth_password: 'qq邮箱授权码'
smtp_hello: '@qq.com'
smtp_require_tls: false
route:
group_by: ['alertname']
group_wait: 10s
group_interval: 10s
repeat_interval: 2m
receiver: 'myalertname'
routes:
- receiver: 'myalertname' #critical 级别的通过邮件发送给 leader
group_wait: 10s
match_re:
severity: critical #匹配严重等级告警
- receiver: 'default-receiver' #宿主机告警通过企业微信发送给监控组
group_wait: 10s
match_re:
severity: warning
receivers:
- name: 'default-receiver'
email_configs:
- to: '通知邮箱地址'
send_resolved: true #通知已经恢复的告警
- name: 'myalertname'
webhook_configs:
- url: 'http://192.168.220.200:8060/dingtalk/alertname/send'
send_resolved: true #通知已经恢复的告警
inhibit_rules:
- source_match:
severity: 'critical'
target_match:
severity: 'warning'
equal: ['alertname', 'dev', 'instance']
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
- 192.168.220.200:9093
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
- "/apps/prometheus/rules/server_rules.yaml"
/apps/prometheus/promtool check rules /apps/prometheusrules/server_rules.yaml
6、配置钉钉构建告警和企业微信告警,并自定义告警信息模板;
配置钉钉告警
./prometheus-webhook-dingtalk --web.listen-address="0.0.0.0:8060" --ding.profile="alertname=https://oapi.dingtalk.com/robot/send?access_token=65cbfef3732b87f78a200973bbbed0d06f73ed38ae0ff04667fa1fec580faa41"
root@dingding-server:/apps# cat /etc/systemd/system/prometheus-webhook-dingtalk.service
[Unit]
Description=Prometheus Server Documentation=https://prometheus.io/docs/introduction/overview/
After=network.target
[Service]
Restart=on-failure WorkingDirectory=/apps/prometheus-webhook-dingtalk/
ExecStart=/apps/prometheus-webhook-dingtalk/prometheus-webhook-dingtalk
--web.listen-address="0.0.0.0:8060"
--ding.profile="alertname=https://oapi.dingtalk.com/robot/send?access_token=ba76276cd923a4e5dcd653ffabe4
b71c4a23e8c4eb8e91446840d527c8d9cd4e"
[Install]
WantedBy=multi-user.target
编辑 alertermanager 调用钉钉告警
receivers:
- name: 'myalertname'
webhook_configs:
- url: 'http://192.168.220.200:8060/dingtalk/alertname/send'
send_resolved: true #通知已经恢复的告警
自定义钉钉告警信息模板
root@dingding-server:/apps/prometheus-webhook-dingtalk# vim template1.yaml
{{ define "dingding.to.message1" }}
{{- if gt (len .Alerts.Firing) 0 -}}
{{- range $index, $alert := .Alerts -}}
========= **监控告警** =========
...
========= = **end** = =========
{{- end }}
{{- end }}
{{- end }}
root@dingding-server:/apps/prometheus-webhook-dingtalk# cp config.example.yml config.ymlroot@dingding-server:/apps/prometheus-webhook-dingtalk# vim config.yml
message: # Use legacy template
text: '{{ template "dingding.to.message1" . }}' #通过模板的 define 定义目标模板名称区分不同的模板,https://github.com/timonwong/prometheus-webhook-dingtalk/issues/16
root@dingding-server:/apps/prometheus-webhook-dingtalk# cp template1.yaml template2.yaml
{{ define "dingding.to.message2" }}**告警级别:** {{ $alert.Labels.severity }} 2 级
root@dingding-server:/apps/prometheus-webhook-dingtalk# cp template1.yaml template3.yaml
{{ define "dingding.to.message3" }}**告警级别:** '{{ $alert.Labels.severity }} 3 级
root@dingding-server:/apps/prometheus-webhook-dingtalk-1.4.0.linux-amd64# ./prometheus-webhook-dingtalk --web.listen-address="0.0.0.0:8060" --web.enable-ui --config.file="config.yml"
![](https://i-blog.csdnimg.cn/blog_migrate/0232db94c2d7c0981dabb2f32edcd6d1.png)
7、配置Prometheus通过kube-state-metrics发现并监控Kubernetes上的常用服务指标
root@k8s-master1:~/prometheus-case-files# kubectl apply -f case5-kube-state-metrics-deploy.yamlroot@k8s-master1:~/prometheus-case-files# kubectl get pod -n kube-system | grep kube-state-metr
scrape_configs:
- job_name: 'kube-state-metrics'
static_configs:
- targets: ['172.31.7.111:31666']
![](https://i-blog.csdnimg.cn/blog_migrate/10e8bb659d8f5d8aa3ec7e35d8600bd6.png)
![](https://i-blog.csdnimg.cn/blog_migrate/570ddc8269b08096f18c11bbbb4071d2.png)
8、熟悉Kubernetes HPA控制器工作流程、Pod伸缩机制及实现pod副本自动伸缩
1,Pod伸缩简介
- 根据当前pod的负载,动态调整 pod副本 Pod1 Pod2 ... Pod N 数量,业务高峰期自动扩容pod的副本数以尽快响应pod的请求。
- 在业务低峰期对pod进行缩容,实现降本增效的目的。
- 公有云支持node级别的弹性伸缩
1.2,pod的伸缩机制
- 首先需要通过pod控制器(deployment/StatefuSet/replicas)运行一些pod,一般我们推荐用deployment控制器
- k8s通过pod控制器控制着pod的数量,一但控制器发生了变化,pod数量也随之变化
- 当需要修改pod的副本数量,可以通过scale修改pod控制器,从而实现pod的副本伸缩
1.3,手动调整pod副本数
----查看pod副本数以及deployment控制器----
root@deploy:~# kubectl get pod -n myserver
root@deploy:~# kubectl get deployment -n myserver
----查看命令帮助----
root@deploy:~# kubectl scale --help
----手动修改deployment控制器----
root@deploy:~# kubectl scale --replicas=2 deployment tomcat-deployment -n myserver root@deploy:~# kubectl get deployment -n myserver
----查看pod副本数----
root@deploy:~# kubectl get pod -n myserver
1.4,动态伸缩控制器类型
- 水平pod自动缩放器(HPA): 基于pod 资源利用率横向调 整pod副本数量
- 垂直pod自动缩放器(VPA): 基于pod资源利用率,调整 对单个pod的最大资源限制, 不能与HPA同时使用。
- 集群伸缩(Cluster Autoscaler,CA) 基于集群中node 资源使用 情况,动态伸缩node节点, 从而保证有CPU和内存资源 用于创建pod
2,HPA控制器
在k8s 1.1引入HPA控制器,早期使用Heapster组件采集pod指标数据,在k8s 1.11版本开始使用Metrices Server完成数据采集,然后将采集到的数据通过API(Aggregated API,汇总API),例如metrics.k8s.io、custom.metrics.k8s.io、external.metrics.k8s.io,然后再把数据提供给HPA控制器进行查询,以实现基于某个资源利用率对pod进行扩缩容的目的。
2.2,HPA控制器的工作流程
- 创建HPA资源对象,关联对应资源例如Deployment,设定目标CPU使用率阈值,最大,最小replica数量。(前提:pod一定要设置资源限制,参数request,HPA才会工作)
- HPA控制器每隔15秒钟(可以通过设置controller manager的–horizontal-pod-autoscaler-sync-period参数设定,默认15s)通过观测metrics值获取资源使用信息
- HPA控制器将获取资源使用信息与HPA设定值进行对比,计算出需要调整的副本数量
- 根据计算结果调整副本数量,使得单个pod的CPU使用率尽量逼近期望值,但不能超过设定的最大,最小值。
- 以上2,3,4周期循环
2.3,HPA的一些基本参数
--horizontal-pod-autoscaler-downscale-stabilization #缩容间隔周期,默认5分钟。
--horizontal-pod-autoscaler-sync-period #HPA控制器同步pod副本数的间隔周期,默认15秒
--horizontal-pod-autoscaler-cpu-initialization-period #初始化延迟时间,在此时间内 pod的CPU 资源指标将不会生效,默认为5分钟。
--horizontal-pod-autoscaler-initial-readiness-delay #用于设置 pod 准备时间, 在此时间内的 pod 统统被认为未就绪及不采集数据,默认为30秒。
--horizontal-pod-autoscaler-tolerance #HPA控制器能容忍的数据差异(浮点数,默认为0.1),即新的指标要与当前的阈值差异在0.1或以上,即要大于 1+0.1=1.1,或小于1-0.1=0.9,比如阈值为CPU利用率50%,当前为80%,那么80/50=1.6 > 1.1则会触发扩容,反之会缩容
即触发条件:avg(CurrentPodsConsumption) / Target >1.1 或 <0.9=把N个pod的数据相加后根据pod的数量计算出平均数除以阈值,大于1.1就扩容,小 于0.9就缩容。
计算公式:TargetNumOfPods = ceil(sum(CurrentPodsCPUUtilization) / Target) #ceil是一个向上取整的目的pod整数。
![](https://i-blog.csdnimg.cn/blog_migrate/25d93f343b1862b3ef554987e9fb2e47.png)
metrics-server# kubectl apply -f metrics-server-v0.6.1.yaml
验证pod指标数据
![](https://i-blog.csdnimg.cn/blog_migrate/5b6aea902f169a48f86c35526bf06d02.png)
验证node指标数据
部署HPA
apiVersion: autoscaling/v2 #定义API版本
kind: HorizontalPodAutoscaler #定义资源对象类型为HorizontalPodAutoscaler
metadata: #定义元数据
namespace: magedu #创建到指定的namespace
name: magedu-podautoscaler #HPA控制器名称
labels: #自定义标签
app: magedu-nginx #标签1
version: v2 #标签2
spec: #定义对象具体信息
scaleTargetRef: #定义水平伸缩的目标对象,Deployment、ReplicationController/ReplicaSet
kind: Deployment #指定伸缩目标类型为Deployment控制器
apiVersion: apps/v1 #Deployment API版本
name: magedu-tomcat-app1-deployment #目标Deployment名称
minReplicas: 1 #最小pod副本数
maxReplicas: 3 #最大pod副本数
metrics: #基于指定的指标数据进行pod副本自动伸缩
- type: Resource #定义指标资源
resource: #定义指标资源具体信息
name: memory #资源名称为memory
target: #目标阈值
type: Utilization #触发类型为利用率
averageUtilization: 50 #平均利用率50%
- type: Resource #定义指标资源
resource: #定义指标资源具体信息
name: cpu #资源名称为cpu
target: ##目标阈值
type: Utilization #触发类型为利用率
averageUtilization: 40 #平均利用率50%
![](https://i-blog.csdnimg.cn/blog_migrate/f04ed0d3f0c8860a8d6524f73112e037.png)
![](https://i-blog.csdnimg.cn/blog_migrate/fc7d34b7aa923792ac83da2fb533cb96.png)
查看 depolyment详细信息
![](https://i-blog.csdnimg.cn/blog_migrate/39ab36744d48d75ee9270be84f48aaa3.png)
![](https://i-blog.csdnimg.cn/blog_migrate/0d52f23558e879cc922ba36898406462.png)
![](https://i-blog.csdnimg.cn/blog_migrate/c065572e3071e186508eac187871914f.png)