kubernetes部署1.24+keepalived+nginx

9 篇文章 0 订阅
8 篇文章 0 订阅
本文详细描述了如何在CentOS环境中配置基础设置,安装和配置Nginx和Keepalived作为负载均衡,以及准备K8S集群环境,包括安装containerd和calico网络,最后部署和测试Kubernetes服务。
摘要由CSDN通过智能技术生成

一、基础配置[六台centos]
1.修改主机名
hostnamectl set-hostname k8s-master01 && bash
hostnamectl set-hostname k8s-master02 && bash
hostnamectl set-hostname k8s-node1 && bash
hostnamectl set-hostname k8s-node2 && bash
hostnamectl set-hostname k8s-lb01 && bash
hostnamectl set-hostname k8s-lb02 && bash
2.关闭防火墙与selinux
systemctl stop firewalld && systemctl disable firewalld
sed -i ‘s/enforcing/disabled/’ /etc/selinux/config && setenforce 0
3.添加host记录
cat >>/etc/hosts <<EOF
192.168.180.210 k8s-master01
192.168.180.200 k8s-master02
192.168.180.190 k8s-node1
192.168.180.180 k8s-node2
192.168.180.170 k8s-lb01
192.168.180.160 k8s-lb02
192.168.180.100 api.k8s.com
EOF

4.关闭交换分区
swapoff -a && sed -ri ‘s/.swap./#&/’ /etc/fstab

5、安装基础软件
yum install -y vim net-tools lrzsz wget git

二、安装和配置nginx+keepalived(在两个lb节点上执行)
yum install -y epel-release
yum install -y nginx nginx-mod-stream
vim /etc/nginx/nginx.conf

stream {
log_format main ‘$remote_addr u p s t r e a m a d d r − [ upstream_addr - [ upstreamaddr[time_local] $status $upstream_bytes_sent’;
access_log /var/log/nginx/k8s-access.log main;
upstream k8s-apiserver {
server 192.168.180.210:6443;
server 192.168.180.200:6443;
}
server {
listen 6443;
proxy_pass k8s-apiserver;
}
}

systemctl start nginx && systemctl enable nginx
lb01:
echo “This is Master Server” > /usr/share/nginx/html/index.html
lb02:
echo “This is Backup Server” > /usr/share/nginx/html/index.html

三、配置keepalived(在两台lb主机上执行)
yum install -y keepalived
vim /tmp/check_k8s.sh
#!/bin/bash

判断nginx是否存活,如果非存活状态则停止keepalived使vip绑定到180

nginx_nums=ps -ef |grep 'nginx: master'|grep -v grep|wc -l
if [ $nginx_nums == 0 ]
then
echo ‘nginx is down’
/etc/init.d/keepalived stop
else
echo ‘nginx is running’
fi

chmod +x /tmp/check_k8s.sh
在lb01上
vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
#vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}

vrrp_script check_web {
script “/tmp/check_k8s.sh”
interval 2

weight 2

}

vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 51
priority 100
advert_int 1
nopreempt
unicast_src_ip 192.168.180.170
unicast_peer {
192.168.180.160
}
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.180.100
}
track_script {
check_k8s
}
}

systemctl start keepalived && systemctl enable keepalived
ip addr show ens33

在lb02上执行
vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived
global_defs {
notification_email {
acassen@firewall.loc
failover@firewall.loc
sysadmin@firewall.loc
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 192.168.200.1
smtp_connect_timeout 30
router_id LVS_DEVEL
vrrp_skip_check_adv_addr
#vrrp_strict
vrrp_garp_interval 0
vrrp_gna_interval 0
}
vrrp_script check_web {
script “/tmp/check_k8s.sh”
interval 2

weight 2

}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 51
priority 80
advert_int 1
nopreempt
unicast_src_ip 192.168.180.160
unicast_peer {
192.168.180.170
}
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.180.100
}
track_script {
check_k8s
}
}
systemctl start keepalived && systemctl enable keepalived
ip addr show ens33

四、准备K8S集群环境
1.加载模块并添加v4流量传递
modprobe br_netfilter
cat > /etc/sysctl.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

sysctl -p

2.安装ipvs
yum install -y conntrack ntpdate ntp ipvsadm ipset iptables curl sysstat libseccomp wget vim net-tools git
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe – ip_vs
modprobe – ip_vs_rr
modprobe – ip_vs_wrr
modprobe – ip_vs_sh
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
lsmod | grep -e ip_vs -e nf_conntrack

3.安装containerd
cat << EOF > /etc/modules-load.d/containerd.conf
overlay
br_netfilter
EOF

modprobe overlay
modprobe br_netfilter
wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install -y containerd.io docker-ce docker-ce-cli
mkdir /etc/containerd -p
containerd config default > /etc/containerd/config.toml
vim /etc/containerd/config.toml
SystemdCgroup = false 改为 SystemdCgroup = true

sandbox_image = “k8s.gcr.io/pause:3.6”

改为:
sandbox_image = “registry.aliyuncs.com/google_containers/pause:3.6”

systemctl enable containerd && systemctl start containerd

ctr version
runc -version

mkdir /etc/docker
vim /etc/docker/daemon.json
{
“graph”: “/data/docker”,
“registry-mirrors”: [“https://01xxgaft.mirror.aliyuncs.com”],
“exec-opts”: [“native.cgroupdriver=systemd”],
“log-driver”: “json-file”,
“log-opts”: {
“max-size”: “100m”
},
“storage-driver”: “overlay2”
}
systemctl restart docker && systemctl enable docker

安装k8s[master/node上都执行]
cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
EOF

yum clean all
yum makecache fast
yum install -y kubectl kubelet kubeadm
systemctl enable kubelet

vim /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS=“–cgroup-driver=systemd”

kubeadm init --control-plane-endpoint “api.k8s.com:6443” --image-repository registry.aliyuncs.com/google_containers --service-cidr=10.140.0.0/16 --pod-network-cidr=10.240.0.0/16

添加master02 (根据实际情况复制初始化三行的结果)
在master2上执行
mkdir -p /etc/kubernetes/pki/etcd
在master01上执行
ssh-keygen -t rsa
ssh-copy-id root@k8s-master02
scp -rp /etc/kubernetes/pki/ca.* k8s-master02:/etc/kubernetes/pki
scp -rp /etc/kubernetes/pki/sa.* k8s-master02:/etc/kubernetes/pki
scp -rp /etc/kubernetes/pki/front-proxy-ca.* k8s-master02:/etc/kubernetes/pki
scp -rp /etc/kubernetes/pki/etcd/ca.* k8s-master02:/etc/kubernetes/pki/etcd
scp -rp /etc/kubernetes/admin.conf k8s-master02:/etc/kubernetes
kubeadm join api.k8s.com:6443 --token 9sx7xt.1ds4j1tc0xmxgnq0 --discovery-token-ca-cert-hash sha256:6b4b23ddca684559f553ba10cbb504d1204d4655199b115d51650f284b86282d --control-plane

添加node,在node1和node2上分别执行(根据实际情况复制初始化两行的结果)
kubeadm join api.k8s.com:6443 --token 9sx7xt.1ds4j1tc0xmxgnq0
–discovery-token-ca-cert-hash sha256:6b4b23ddca684559f553ba10cbb504d1204d4655199b115d51650f284b86282d

部署calico网络[master]
wget https://docs.projectcalico.org/v3.18/manifests/calico.yaml --no-check-certificate
vim calico.yaml //3673行修改为如下

  • name: CALICO_IPV4POOL_CIDR
    value: “10.240.0.0/16”

kubectl apply -f calico.yaml
kubectl taint nodes --all node-role.kubernetes.io/control-plane:NoSchedule-
kubectl get pod --all-namespaces -o wide
kubectl get node -o wide

创建测试容器
kubectl create deployment nginx --image=nginx
kubectl expose deployment nginx --port=80 --type=NodePort
kubectl get pod,svc
通过curl访问内部IP看能否访问网站
在lb01上执行
tail -f /var/log/nginx/k8s-access.log
看是否有K8S请求日志

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

叮咚网工

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值