Kubernetes(六)集群部署—负载均衡
文章目录
搭建完多节点后,master2认node1和node2了,但是这两个不认master2,所以需要负载均衡来调整
一、LB1 LB2操作
安装nginx服务,把nginx.sh和keeplibed.conf 脚本拷贝复制到家目录
记得关闭防火墙!
systemctl stop firewalld
setenforce 0
[root@localhost ~]# vim /etc/yum.repos.d/nginx.repo
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
[root@localhost ~]# yum install nginx -y
添加四层转发
[root@localhost ~]# vim /etc/nginx/nginx.conf
events {
worker_connections 1024;
}
#后面添加以下
stream {
log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 192.168.171.7:6443; #配置负载均衡指向master节点
server 192.168.171.8:6443;
}
server {
listen 6443; #访问端口
proxy_pass k8s-apiserver; #转发调动proxy访问代理
}
}
[root@localhost ~]# systemctl start nginx
//部署keepalived服务
[root@localhost ~]# yum install keepalived -y
//上传keeplive配置文件
然后修改配置文件
[root@localhost ~]# cp keepalived.conf /etc/keepalived/keepalived.conf
//提示是否覆盖直接yes就行
注意:lb01是Mster配置如下:
vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
# 接收邮件地址
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
# 邮件发送地址
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_MASTER
}
vrrp_script check_nginx {
script "/etc/nginx/check_nginx.sh"
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
priority 100 # 优先级,备服务器设置 90
advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.171.99/24
}
track_script {
check_nginx
}
注意:lb02是Backup配置如下:
! Configuration File for keepalived
global_defs {
# 接收邮件地址
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
# 邮件发送地址
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_MASTER
}
vrrp_script check_nginx {
script "/etc/nginx/check_nginx.sh"
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
priority 90 # 优先级,备服务器设置 90
advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.171.99/24
}
track_script {
check_nginx
}
}
vim /etc/nginx/check_nginx.sh
count=$(ps -ef |grep nginx |egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
systemctl stop keepalived
fi
chmod +x /etc/nginx/check_nginx.sh
systemctl start keepalived
查看lb01地址信息
ip a
漂移地址在lb01中
查看lb02地址信息
ip a
目前应该是没有漂移地址的
验证地址漂移
- 可在nginx01中关闭nginx服务,再去nginx02使用ip a查看VIP是否漂移’pkill nginx杀死
- 恢复操作(在lb01中先启动nginx服务,再启动keepalived服务)'VIP就会漂移回来,nginx01优先级比nginx02高’
- nginx站点/usr/share/nginx/html
//开始修改node节点配置文件统一VIP(bootstrap.kubeconfig,kubelet.kubeconfig)
[root@localhost cfg]# vim /opt/kubernetes/cfg/bootstrap.kubeconfig
[root@localhost cfg]# vim /opt/kubernetes/cfg/kubelet.kubeconfig
[root@localhost cfg]# vim /opt/kubernetes/cfg/kube-proxy.kubeconfig
//统统修改为VIP
server: https://192.168.171.99:6443
[root@localhost cfg]# systemctl restart kubelet.service
[root@localhost cfg]# systemctl restart kube-proxy.service
//替换完成直接自检
cd /opt/kubernetes/cfg/
[root@localhost cfg]# grep 99 *
//在lb01上查看nginx的k8s日志
[root@localhost ~]# tail /var/log/nginx/k8s-access.log
注意nodo1和node2的docker 和flannel 的ip是否是同一网段
否则都要重启一下
systemctl daemon-reload
systemctl restart docker
二、在master01上操作
//测试创建pod
[root@localhost ~]# kubectl run nginx --image=nginx
//查看状态
[root@localhost ~]# kubectl get pods
注意日志问题
kubectl logs nginx-dbddb74b8-nf9sk
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous
查看pod网络
kubectl get pods -o wide
在对应网段的node节点上操作可以直接访问
curl 172.17.47.2
访问就会产生日志
回到master01操作
[root@localhost ~]# kubectl logs nginx-dbddb74b8-hrbwg