高可用master整理
就是为了怕down机,准备一个vip
准备工作必须要到位
1、关FREE
2、seliuxe
3、firewalld
4、hosts
5、nfs
6、harbor
7、路由转发内核那几个参数
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
8、其他
#加入tab可以执行
yum install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc
#tab数量
echo 'set tabstop=2' >> ~/.vimrc
source ~/.vimrc
haproxy
这个其实我觉得可以没有我也是看几个大佬用haproxy
我也就用了
先安装
yum install -y haproxy
最好把配置文件给备份一份
cp /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak
改配置文件
#---------------------------------------------------------------------
# Example configuration for a possible web application. See the
# full configuration options online.
#
# http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
# to have these messages end up in /var/log/haproxy.log you will
# need to:
#
# 1) configure syslog to accept network log events. This is done
# by adding the '-r' option to the SYSLOGD_OPTIONS in
# /etc/sysconfig/syslog
#
# 2) configure local2 events to go to the /var/log/haproxy.log
# file. A line like the following can be added to
# /etc/sysconfig/syslog
#
# local2.* /var/log/haproxy.log
#
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
#---------------------------------------------------------------------
frontend kubernetes-apiserver
mode tcp
bind *:16443
option tcplog
default_backend kubernetes-apiserver
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-apiserver
mode tcp
balance roundrobin
server k8s1 110.120.119.81:6443 check
server k8s2 110.120.119.82:6443 check
#---------------------------------------------------------------------
# collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
bind *:1080
stats auth admin:awesomePassword
stats refresh 5s
stats realm HAProxy\ Statistics
stats uri /admin?stats
stats uri 没看错就是这么写的默认是为/haproxy?stats 自定义统计页面的URL
master节点的配置文件相同
scp /etc/haproxy/haproxy.cfg 110.120.119.82:/etc/haproxy/haproxy.cfg
[root@k8s1 data]# systemctl enable haproxy
Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.
[root@k8s1 data]# systemctl start haproxy
一些参数说明
这个端口很重要
frontend kubernetes-apiserver
mode tcp
bind *:16443
这个是绑定地址和端口
server k8s1 110.120.119.81:6443 check
server k8s2 110.120.119.82:6443 check
keepalived
这个是主要的
先安装
yum install -y keepalived
[root@k8s1 data]# systemctl enable keepalived.service
主节点配置文件
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script check_haproxy {
script "kellall -o haproxy"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
110.120.119.99
}
track_script {
check_haproxy
}
}
从节点们
[root@k8s2 data]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id LVS_DEVEL
}
vrrp_script check_haproxy {
script "kellall -o haproxy"
interval 3
weight -2
fall 10
rise 2
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 51
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
110.120.119.99
}
track_script {
check_haproxy
}
}
参数说明
这里指的vip虚拟绑定的地址
virtual_ipaddress {
110.120.119.99
}
state BACKUP
网卡
interface ens33
virtual_router_id 51
权重
priority 90
advert_int 1
这里说一下如果两个vip都有抢到了地址说明防火墙或者什么可能阻拦其通信,据说是112端口可以打开试一下。
kubeadm安装方式
目前收集的方式有很多种,但是keepalive必须成功
不管你用哪种方式,kuber的镜像要下载完成
kubeadm config images list
查看所需的镜像
你可能无法下载没错的,你肯定下载不下来的。你需要曲线救国用以下脚本以1.18.8为例,后边保存的语句可以注释
echo ""
echo "=========================================================="
echo "Pull Kubernetes v1.18.8 Images from aliyuncs.com ......"
echo "=========================================================="
echo ""
MY_REGISTRY=aiotceo
## 拉取镜像
docker pull ${MY_REGISTRY}/kube-apiserver:v1.18.8
docker pull ${MY_REGISTRY}/kube-controller-manager:v1.18.8
docker pull ${MY_REGISTRY}/kube-scheduler:v1.18.8
docker pull ${MY_REGISTRY}/kube-proxy:v1.18.8
docker pull ${MY_REGISTRY}/pause:3.2
docker pull ${MY_REGISTRY}/etcd:3.4.3-0
docker pull ${MY_REGISTRY}/coredns:1.6.7
## 添加Tag
docker tag ${MY_REGISTRY}/kube-apiserver:v1.18.8 k8s.gcr.io/kube-apiserver:v1.18.8
docker tag ${MY_REGISTRY}/kube-scheduler:v1.18.8 k8s.gcr.io/kube-scheduler:v1.18.8
docker tag ${MY_REGISTRY}/kube-controller-manager:v1.18.8 k8s.gcr.io/kube-controller-manager:v1.18.8
docker tag ${MY_REGISTRY}/kube-proxy:v1.18.8 k8s.gcr.io/kube-proxy:v1.18.8
docker tag ${MY_REGISTRY}/pause:3.2 k8s.gcr.io/pause:3.2
docker tag ${MY_REGISTRY}/etcd:3.4.3-0 k8s.gcr.io/etcd:3.4.3-0
docker tag ${MY_REGISTRY}/coredns:1.6.7 k8s.gcr.io/coredns:1.6.7
docker save -o kube-apiserver-v1.18.8.tar k8s.gcr.io/kube-apiserver:v1.18.8
docker save -o kube-scheduler-v1.18.8.tar k8s.gcr.io/kube-scheduler:v1.18.8
docker save -o kube-controller-manager-v1.18.8.tar k8s.gcr.io/kube-controller-manager:v1.18.8
docker save -o kube-proxy-v1.18.8.tar k8s.gcr.io/kube-proxy:v1.18.8
docker save -o coredns-1.6.7.tar k8s.gcr.io/coredns:1.6.7
docker save -o etcd-3.4.3-0.tar k8s.gcr.io/etcd:3.4.3-0
docker save -o pause-3.2.tar k8s.gcr.io/pause:3.2
echo ""
echo "=========================================================="
echo "Pull Kubernetes v1.18.8 Images FINISHED."
echo "into registry.cn-hangzhou.aliyuncs.com/openthings, "
echo " by openthings@https://my.oschina.net/u/2306127."
echo "=========================================================="
echo ""
弄完了就算是告了一段大落了
for i in `ls`; do docker input -i $i ;done
第一种方法,生成kubeadm config
先生成一个kubeadm config的配置文件额
kubeadm config printinit-defaults > kubeadm-config.yaml
生成的yaml文件打开这个样子的需要改一些参数
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
#本地的ip地址和端口
advertiseAddress: 110.120.119.81
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s1
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
#controlPlaneEndpoint这个参数是自己添加,指的vip的地址和haproxy的端口
controlPlaneEndpoint: "110.120.119.99:16443"
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
#这个是版本号需要改一下下因为可能生成的版本不对劲
kubernetesVersion: v1.18.8
networking:
dnsDomain: cluster.local
serviceSubnet: 10.96.0.0/12
scheduler: {}
有yaml文件了之后,就可以进行初始化了,这里就不说初始化的手续了
直接初始化
kubeadm init --config=kubeadm-config.yaml \
--ignore-preflight-errors=all \
--upload-crets --v=6 | tee kubeadm-init.log
##########
说明一下参数
--config=kubeadm-config.yaml 制定生成的配置的文件推荐在生成的目录下
--ignore-preflight-errors=all 忽略错误,一般是swap,但是如果你生成过一次就是需要将参数改成all
--upload-crets 这个参数上传证书到kubeadm-certs
--v=6 日志级别据说是比较高的级别才会显示 我一般没注意这事儿
可以添加的参数
--pod-network-cidr=10.244.0.0/16 嗯
--image-repository registry.aliyuncs.com/google_containers 这个是选择初始化的镜像仓库貌似是1.17之后才有的?1.15没听说过。总之你上边的曲线jiuguo做好了这个就不重要了
然后会生成两个命令
mkdir 那三个命令就不说了
挺长的kubeadm 就是第一个是用来加入master节点的
最下边的是用来加入node节点的 命令行少的那个。
在节点处执行即可加入 记得执行mkdir那三行命令否则无法使用kubectl命令因为无法连接服务器
最后
systemctl enable kubelet
第二种方式,比较简单
都是借鉴别人的,太多了就不一一感谢了,多谢你们这些巨人
那么开始:
一条命令搞定,
kubeadm init --kubernetes-version=v1.18.8 \
--pod-network-cidr=10.244.0.0/16 \
--service-cidr=10.96.0.0/12 \
--ignore-preflight-errors=all \
--api-advertise-addresses=110.120.119.81 \
--apiserver-bind-port=6443 \
--control-plane-endpoint=110.120.119.99:16443 \
--upload-certs
#--control-plane-endpoint=110.120.119.99:16443这个命令是关键
第三种方式
这个是copy的方式,就是借助etcd然后将scp -r 110.120.119:/etc/kubernetes/* /etc/kubernetes/
拷贝过去通过锁实现强制确认,没研究透。先放着
二进制。这个其实比较简单了。
不管用什么方法都需要使用vip。
那么我就开始吧
首先下载kuber的包是吧
https://kubernetes.io/docs/setup/release/notes/
https://github.com/kubernetes/kubernetes/releases/tag/v1.18.13