一、安装准备
-
硬件资源:
服务器: 4台 8核16G -
文档:
-
https://rancher.com/docs/rancher/v2.x/en/installation/
-
https://rancher.com/docs/rke/latest/en/config-options/
初始化安装(所有节点操作)
1、替换yum源
mkdir /root/repo-old && mv /etc/yum.repo.d/* /root/repo-old
wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo
yum clean all && yum makecache
yum update
2、关闭防火墙
sed -i '/^SELINUX/s/enforcing/disabled/' /etc/selinux/config
setenforce 0
cat >> /etc/sysctl.conf << END
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
END
modprobe br_netfilter
sysctl -p
hostnamectl set-hostname master01
swapoff -a
sed -i '/swap/s/^/#/g' /etc/fstab
mount -a
GRUB_CMDLINE_LINUX_DEFAULT="cgroup_enable=memory swapaccount=1"
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
yum install ipset ipvsadm -y
systemctl stop firewalld && systemctl disable firewalld
2、安装docker
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+mirrors.tuna.tsinghua.edu.cn/docker-ce+' /etc/yum.repos.d/docker-ce.repo
yum list docker-ce.x86_64 --showduplicates | sort -r
yum install docker-ce-19.03.15-3.el7
systemctl start docker && systemctl enable docker
3、创建用户
useradd ops
usermod -G docker ops
passwd ops
二、安装步骤
以下仅在master01上操作
1、设置免密登录
su ops
$ ssh-keygen -t rsa
$ ssh-copy-id -i ops@master02
$ ssh-copy-id -i ops@master03
$ ssh-copy-id -i ops@work01
$ ssh ops@work01
$ exit
```sh
mv rke_linux-amd64 /usr/local/bin
rke version
cd /home/ops
su - ops
$ rke config --empty --name cluster.yml
编辑配置文件
If you intened to deploy Kubernetes in an air-gapped environment,
please consult the documentation on how to configure custom RKE images.
nodes:
- address: 192.168.0.215
port: "22"
internal_address: ""
role:
- controlplane
- etcd
hostname_override: ""
user: ops
docker_socket: /var/run/docker.sock
ssh_key: ""
ssh_key_path: ~/.ssh/id_rsa
ssh_cert: ""
ssh_cert_path: ""
labels: {}
taints: []
- address: 192.168.0.216
port: "22"
internal_address: ""
role:
- controlplane
- etcd
hostname_override: ""
user: ops
docker_socket: /var/run/docker.sock
ssh_key: ""
ssh_key_path: ~/.ssh/id_rsa
ssh_cert: ""
ssh_cert_path: ""
labels: {}
taints: []
- address: 192.168.0.206
port: "22"
internal_address: ""
role:
- controlplane
- etcd
hostname_override: ""
user: ops
docker_socket: /var/run/docker.sock
ssh_key: ""
ssh_key_path: ~/.ssh/id_rsa
ssh_cert: ""
ssh_cert_path: ""
labels: {}
taints: []
- address: 192.168.0.35
port: "22"
internal_address: ""
role:
- worker
hostname_override: ""
user: ops
docker_socket: /var/run/docker.sock
ssh_key: ""
ssh_key_path: ~/.ssh/id_rsa
ssh_cert: ""
ssh_cert_path: ""
labels: {}
taints: []
services:
etcd:
image: ""
extra_args: {}
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_binds: []
win_extra_env: []
external_urls: []
ca_cert: ""
cert: ""
key: ""
path: ""
uid: 0
gid: 0
snapshot: null
retention: ""
creation: ""
backup_config: null
kube-api:
image: ""
extra_args: {}
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_binds: []
win_extra_env: []
service_cluster_ip_range: 10.43.0.0/16
service_node_port_range: ""
pod_security_policy: false
always_pull_images: false
secrets_encryption_config: null
audit_log: null
admission_configuration: null
event_rate_limit: null
kube-controller:
image: ""
extra_args: {}
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_binds: []
win_extra_env: []
cluster_cidr: 10.42.0.0/16
service_cluster_ip_range: 10.43.0.0/16
scheduler:
image: ""
extra_args: {}
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_binds: []
win_extra_env: []
kubelet:
image: ""
extra_args: {}
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_binds: []
win_extra_env: []
cluster_domain: cluster.local
infra_container_image: ""
cluster_dns_server: 10.43.0.10
fail_swap_on: false
generate_serving_certificate: false
kubeproxy:
image: ""
extra_args: {}
extra_binds: []
extra_env: []
win_extra_args: {}
win_extra_binds: []
win_extra_env: []
network:
plugin: calico
options: {}
mtu: 0
node_selector: {}
update_strategy: null
tolerations: []
authentication:
strategy: x509
sans: []
webhook: null
addons: ""
addons_include: []
system_images:
etcd: rancher/coreos-etcd:v3.4.14-rancher1
alpine: rancher/rke-tools:v0.1.71
nginx_proxy: rancher/rke-tools:v0.1.71
cert_downloader: rancher/rke-tools:v0.1.71
kubernetes_services_sidecar: rancher/rke-tools:v0.1.71
kubedns: rancher/k8s-dns-kube-dns:1.15.10
dnsmasq: rancher/k8s-dns-dnsmasq-nanny:1.15.10
kubedns_sidecar: rancher/k8s-dns-sidecar:1.15.10
kubedns_autoscaler: rancher/cluster-proportional-autoscaler:1.8.1
coredns: rancher/coredns-coredns:1.8.0
coredns_autoscaler: rancher/cluster-proportional-autoscaler:1.8.1
nodelocal: rancher/k8s-dns-node-cache:1.15.13
kubernetes: rancher/hyperkube:v1.20.2-rancher2
flannel: rancher/coreos-flannel:v0.13.0-rancher1
flannel_cni: rancher/flannel-cni:v0.3.0-rancher6
calico_node: rancher/calico-node:v3.17.1
calico_cni: rancher/calico-cni:v3.17.1
calico_controllers: rancher/calico-kube-controllers:v3.17.1
calico_ctl: rancher/calico-ctl:v3.17.1
calico_flexvol: rancher/calico-pod2daemon-flexvol:v3.17.1
canal_node: rancher/calico-node:v3.17.1
canal_cni: rancher/calico-cni:v3.17.1
canal_controllers: rancher/calico-kube-controllers:v3.17.1
canal_flannel: rancher/coreos-flannel:v0.13.0-rancher1
canal_flexvol: rancher/calico-pod2daemon-flexvol:v3.17.1
weave_node: weaveworks/weave-kube:2.8.1
weave_cni: weaveworks/weave-npc:2.8.1
pod_infra_container: rancher/pause:3.2
ingress: rancher/nginx-ingress-controller:nginx-0.43.0-rancher1
ingress_backend: rancher/nginx-ingress-controller-defaultbackend:1.5-rancher1
metrics_server: rancher/metrics-server:v0.4.1
windows_pod_infra_container: rancher/kubelet-pause:v0.1.4
aci_cni_deploy_container: noiro/cnideploy:5.1.1.0.1ae238a
aci_host_container: noiro/aci-containers-host:5.1.1.0.1ae238a
aci_opflex_container: noiro/opflex:5.1.1.0.1ae238a
aci_mcast_container: noiro/opflex:5.1.1.0.1ae238a
aci_ovs_container: noiro/openvswitch:5.1.1.0.1ae238a
aci_controller_container: noiro/aci-containers-controller:5.1.1.0.1ae238a
aci_gbp_server_container: noiro/gbp-server:5.1.1.0.1ae238a
aci_opflex_server_container: noiro/opflex-server:5.1.1.0.1ae238a
ssh_key_path: ~/.ssh/id_rsa
ssh_cert_path: ""
ssh_agent_auth: false
authorization:
mode: rbac
options: {}
ignore_docker_version: null
kubernetes_version: ""
private_registries: []
ingress:
provider: ""
options: {}
node_selector: {}
extra_args: {}
dns_policy: ""
extra_envs: []
extra_volumes: []
extra_volume_mounts: []
update_strategy: null
http_port: 0
https_port: 0
network_mode: ""
tolerations: []
default_backend: null
cluster_name: ""
cloud_provider:
name: ""
prefix_path: ""
win_prefix_path: ""
addon_job_timeout: 0
bastion_host:
address: ""
port: ""
user: ""
ssh_key: ""
ssh_key_path: ""
ssh_cert: ""
ssh_cert_path: ""
monitoring:
provider: ""
options: {}
node_selector: {}
update_strategy: null
replicas: null
tolerations: []
restore:
restore: false
snapshot_name: ""
rotate_encryption_key: false
dns: null
rke up
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubectl
mkdir /root/.kube/
cp /home/ops/kube_config_cluster.yml /root/.kube/config
kubectl get node -o wide
3、安装helm
tar xf helm-v3.4.0-linux-amd64.tar.gz
mv ./linux-amd64/helm /usr/local/bin
helm version
4、安装Runcher
helm repo add rancher-stable http://rancher-mirror.oss-cn-beijing.aliyuncs.com/server-charts/stable
为Runcher创建NameSpace
kubectl create namespace cattle-system
选择默认的ssl选项,使用runcher使用的自签名证书
Rancher 生成的自签名证书 ingress.tls.source=rancher
安装cert-manager
# 安装 CustomResourceDefinition 资源
kubectl apply --validate=false \
-f https://github.com/jetstack/cert-manager/releases/download/v0.15.0/cert-manager.crds.yaml
# 为 cert-manager 创建命名空间
kubectl create namespace cert-manager
# 添加 Jetstack Helm 仓库
helm repo add jetstack https://charts.jetstack.io
# 更新本地 Helm chart 仓库缓存
helm repo update
# 安装 cert-manager Helm chart
helm install \
cert-manager jetstack/cert-manager \
--namespace cert-manager \
--version v0.15.0
验证部署情况
kubectl get pods --namespace cert-manager
根据您选择的 SSL 选项,通过 Helm 安装 Rancher
helm install rancher rancher-stable/rancher \
--namespace cattle-system \
--set hostname=rancher.my.org
等待Runcher运行
kubectl -n cattle-system rollout status deploy/rancher
验证 Rancher Server 是否已成功部署
kubectl -n cattle-system get deploy rancher
注意 :
请保存您使用的全部–set选项。使用 Helm 升级 Rancher 到新版本时,您将需要使用相同的选项
至此安装完成
三、备份和恢复
参考文档地址
https://docs.rancher.cn/docs/rancher2/backups/2.5/back-up-rancher/_index
四、流水线构建
五、优化