环境说明
系统
[root@master-12 ~]# uname -a
Linux master-12 3.10.0-957.el7.x86_64 #1 SMP Thu Nov 8 23:39:32 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
[root@master-12 ~]# cat /etc/redhat-release
CentOS Linux release 7.6.1810 (Core)
修改hosts,每个节点都要改
[root@master-12 ~]# cat /etc/hosts
172.17.0.12 master-12
172.17.0.8 slave-08
关闭防火墙和swap
sed -ri 's#(SELINUX=).*#\1disabled#' /etc/selinux/config
setenforce 0
systemctl disable firewalld
systemctl stop firewalld
swapoff -a
添加内核配置
vi /etc/sysctl.conf
net.ipv4.ip_forward=1
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
vm.swappiness=0
# 生效配置
modprobe br_netfilter
sysctl -p
如果没有启用br_netfilter内核模块,会报以下错误
kernel.sysrq = 1
vm.overcommit_memory = 1
kernel.numa_balancing = 0
kernel.shmmax = 68719476736
kernel.printk = 5
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: 没有那个文件或目录
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: 没有那个文件或目录
配置IPVS模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
配置yum源(阿里的源)
cat << EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum makecache -y
安装docker
# 安装 yum-config-manager
yum -y install yum-utils
# 导入
yum-config-manager \
--add-repo \
https://download.docker.com/linux/centos/docker-ce.repo
# 更新 repo
yum makecache -y
# 查看yum 版本
yum list docker-ce.x86_64 --showduplicates |sort -r
# 官方目前版本为18.09.x 这里直接安装最新版本就行
yum -y install docker-ce
启动docker
systemctl daemon-reload
systemctl start docker
systemctl enable docker
部署kubernetes
# kubernetes 相关 (Master)
yum -y install kubelet-1.14.1 kubeadm-1.14.1 kubectl-1.14.1
# kubernetes 相关 (Node)
yum -y install kubelet-1.14.1 kubeadm-1.14.1
# ipvs 相关
yum -y install ipvsadm ipset
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
安装的程序
Running transaction
正在安装 : socat-1.7.3.2-2.el7.x86_64 1/10
正在安装 : libnetfilter_queue-1.0.2-2.el7_2.x86_64 2/10
正在安装 : libnetfilter_cttimeout-1.0.0-6.el7.x86_64 3/10
正在安装 : kubectl-1.14.1-0.x86_64 4/10
正在安装 : libnetfilter_cthelper-1.0.0-9.el7.x86_64 5/10
正在安装 : conntrack-tools-1.4.4-4.el7.x86_64 6/10
正在安装 : kubernetes-cni-0.7.5-0.x86_64 7/10
正在安装 : kubelet-1.14.1-0.x86_64 8/10
正在安装 : cri-tools-1.12.0-0.x86_64 9/10
正在安装 : kubeadm-1.14.1-0.x86_64 10/10
验证中 : cri-tools-1.12.0-0.x86_64 1/10
验证中 : libnetfilter_cthelper-1.0.0-9.el7.x86_64 2/10
验证中 : kubectl-1.14.1-0.x86_64 3/10
验证中 : libnetfilter_cttimeout-1.0.0-6.el7.x86_64 4/10
验证中 : libnetfilter_queue-1.0.2-2.el7_2.x86_64 5/10
验证中 : kubeadm-1.14.1-0.x86_64 6/10
验证中 : kubelet-1.14.1-0.x86_64 7/10
验证中 : kubernetes-cni-0.7.5-0.x86_64 8/10
验证中 : socat-1.7.3.2-2.el7.x86_64 9/10
验证中 : conntrack-tools-1.4.4-4.el7.x86_64 10/10
设置kubelet开机启动
[root@master-12 ~]# systemctl enable kubelet.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
修改kubeadm信息
kubeadm config print init-defaults > kubeadm-init.yaml
[root@master-12 ~]# cat kubeadm-init.yaml
apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 172.17.0.12
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: master-12
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: ""
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.14.1
networking:
dnsDomain: cluster.local
podSubnet: "10.254.64.0/18"
serviceSubnet: "10.254.0.0/18"
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
初始化集群
[root@master-12 ~]# kubeadm init --config kubeadm-init.yaml
[init] Using Kubernetes version: v1.14.1
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [master-12 kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.254.0.1 172.17.0.12]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [master-12 localhost] and IPs [172.17.0.12 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [master-12 localhost] and IPs [172.17.0.12 127.0.0.1 ::1]
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 16.501947 seconds
[upload-config] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.14" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --experimental-upload-certs
[mark-control-plane] Marking the node master-12 as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node master-12 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: abcdef.0123456789abcdef
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 172.17.0.12:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:5dccefeca4c1678cd175a3b23b9967bb00258c310c647bce5f26b0b7bd20f70e
添加节点
[root@slave-08 ~]# kubeadm join 172.17.0.12:6443 --token abcdef.0123456789abcdef --discovery-token-ca-cert-hash sha256:5dccefeca4c1678cd175a3b23b9967bb00258c310c647bce5f26b0b7bd20f70e
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.14" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
安装calico cni插件
wget https://docs.projectcalico.org/v3.7/manifests/calico.yaml
vi calico.yaml
# 修改 pods 分配的 IP 段
- name: CALICO_IPV4POOL_CIDR
value: "10.254.64.0/18"
[root@master-12 ~]# kubectl apply -f calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.extensions/calico-node created
serviceaccount/calico-node created
deployment.extensions/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
查看nodes的状态
查看各个组件的status
部署完成
参考
https://kubernetes.io/docs/setup/production-environment/container-runtimes/