【无标题】

部署k8s集群–使用kubeadm方式


安装环境

Kubernetes有多种部署方式,目前主流的方式有kubeadm、minikube、二进制包
1、Minikube:一个用于快速搭建单节点kubernetes的工具
2、Kubeadm:一个用于快速搭建kubernetes集群的工具,https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm/
3、二进制包:从官网下载每个组件的二进制包,依次去安装,此方式对于理解kubernetes组件更加有效,https://github.com/kubernetes/kubernetes
说明:现在需要安装kubernetes的集群环境,但是又不想过于麻烦,所有选择使用kubeadm方式

3、主机规划

角色ip地址组件
maste192.168.17.128docker,kubectl,kubeadm,kubelet
node1192.168.17.135docker,kubectl,kubeadm,kubelet
node2192.168.17.136docker,kubectl,kubeadm,kubelet

环境搭建

[root@master ~]# vim /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.17.128 master.example.com master
192.168.17.135 node1.example.com node1
192.168.17.136 node2.example.com  node2

时钟同步
kubernetes要求集群中的节点时间必须精确一致,这里使用chronyd服务从网络同步时间
企业中建议配置内部的时间同步服务器

Master:
[root@master ~]# vim /etc/chrony.conf
[root@master ~]# local stratum 10

[root@master ~]# systemctl restart chronyd
[root@master ~]# systemctl enable chronyd
[root@master ~]# hwclock  -w

Node1和node2:
[root@node1 ~]# vim /etc/chrony.conf
[root@node1 ~]# server master.example.com  iburst
[root@node1 ~]# systemctl restart chronyd
[root@node1 ~]# systemctl enable chronyd
[root@node1 ~]# hwclock  -w

[root@node2 ~]# vim /etc/chrony.conf
[root@node2 ~]# server master.example.com  iburst
[root@node2 ~]# systemctl restart chronyd
[root@node2 ~]# systemctl enable chronyd
[root@node2 ~]# hwclock  -w

禁用firewalld、selinux、postfix(三个节点都做)
关闭防火墙、selinux,postfix----3台主机都配置

[root@master ~]# systemctl stop firewalld
[root@master ~]# systemctl disable firewalld
[root@master ~]# vim /etc/selinux/config
SELINUX=disabled
[root@master ~]# systemctl stop postfix
[root@master ~]# systemctl disable postfix

[root@node1 ~]# systemctl stop firewalld
[root@node1 ~]# systemctl disable firewalld
[root@node1 ~]# vim /etc/selinux/config
SELINUX=disabled
[root@node1 ~]# systemctl stop postfix
[root@node1 ~]# systemctl disable postfix


[root@node2 ~]# systemctl stop firewalld
[root@node2 ~]# systemctl disable firewalld
[root@node2 ~]# vim /etc/selinux/config
SELINUX=disabled
[root@node2 ~]# systemctl stop postfix
[root@node2 ~]# systemctl disable postfix

禁用swap分区

[root@master ~]# vim /etc/fstab
注释掉swap分区那一行
#/dev/mapper/cs-swap     none                    swap    defaults        0 0
[root@master ~]# swapoff  -a

[root@node1 ~]# vim /etc/fstab
注释掉swap分区那一行
#/dev/mapper/cs-swap     none                    swap    defaults        0 0
[root@node1 ~]# swapoff  -a

[root@node2 ~]# vim /etc/fstab
注释掉swap分区那一行
#/dev/mapper/cs-swap     none                    swap    defaults        0 0
[root@node2 ~]# swapoff  -a

开启IP转发,和修改内核信息

[root@master ~]# vim /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@master ~]# modprobe   br_netfilter
[root@master ~]# sysctl -p  /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1

[root@node1 ~]# vim /etc/sysctl.d/k8s.conf
[root@node1 ~]# modprobe   br_netfilter
[root@node1 ~]# sysctl -p  /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1

[root@node2 ~]# vim /etc/sysctl.d/k8s.conf
[root@node2 ~]# modprobe   br_netfilter
[root@node2 ~]# sysctl -p  /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1


配置IPVS功能

[root@master ~]# chmod +x /etc/sysconfig/modules/ipvs.modules
[root@master ~]# bash /etc/sysconfig/modules/ipvs.modules
modprobe: FATAL: Module ip_vs_sh#!/bin/bash not found in directory /lib/modules/4.18.0-257.el8.x86_64
[root@master ~]# lsmod | grep -e ip_vs 
ip_vs_sh               16384  0
ip_vs_wrr              16384  0
ip_vs_rr               16384  0
ip_vs                 172032  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          172032  1 ip_vs
nf_defrag_ipv6         20480  2 nf_conntrack,ip_vs
libcrc32c              16384  3 nf_conntrack,xfs,ip_vs
[root@master ~]# reboot

安装docker

切换镜像源

[root@master ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo
[root@master ~]# dnf -y install epel-release
[root@master ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

[root@node1 ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo
[root@mnode1 ~]# dnf -y install epel-release
[root@node1 ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

[root@node2 ~]# wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo
[root@node2 ~]# dnf -y install epel-release
[root@node2 ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

安装docker-ce

[root@master ~]# dnf -y install docker-ce --allowerasing
[root@master ~]# systemctl restart docker
[root@master ~]# systemctl enable docker

添加一个配置文件,配置docker仓库加速器

[root@master ~]# cat > /etc/docker/daemon.json << EOF
 {
   "registry-mirrors": ["https://14lrk6zd.mirror.aliyuncs.com"],
   "exec-opts": ["native.cgroupdriver=systemd"],
   "log-driver": "json-file",
   "log-opts": {
     "max-size": "100m"
   },
   "storage-driver": "overlay2"
 }
 EOF
[root@master ~]# systemctl daemon-reload
[root@master ~]# systemctl restart docker

[root@node1 ~]# cat > /etc/docker/daemon.json << EOF
> {
>   "registry-mirrors": ["https://14lrk6zd.mirror.aliyuncs.com"],
>   "exec-opts": ["native.cgroupdriver=systemd"],
>   "log-driver": "json-file",
>   "log-opts": {
>     "max-size": "100m"
>   },
>   "storage-driver": "overlay2"
> }
> EOF
[root@node1 ~]# systemctl daemon-reload
[root@node1 ~]# systemctl restart docker


[root@node2 ~]# cat > /etc/docker/daemon.json << EOF
> {
>   "registry-mirrors": ["https://14lrk6zd.mirror.aliyuncs.com"],
>   "exec-opts": ["native.cgroupdriver=systemd"],
>   "log-driver": "json-file",
>   "log-opts": {
>     "max-size": "100m"
>   },
>   "storage-driver": "overlay2"
> }
> EOF
[root@node2 ~]# systemctl daemon-reload
[root@node2 ~]# systemctl restart docker


安装kubernetes组件

由于kubernetes的镜像在国外,速度比较慢,这里切换成国内的镜像源

[root@master ~]# cd /etc/yum.repos.d/
[root@master yum.repos.d]# vim kubernetes.repo
[root@master yum.repos.d]# cat kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

[root@node1 ~]# cd /etc/yum.repos.d/
[root@ node1 yum.repos.d]# vim kubernetes.repo
[root@node1 yum.repos.d]# cat kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

[root@node2 ~]# cd /etc/yum.repos.d/
[root@node2 yum.repos.d]# vim kubernetes.repo
[rootnode2 yum.repos.d]# cat kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

安装kubeadm kubelet kubectl工具

[root@master yum.repos.d]# dnf  -y  install kubeadm  kubelet  kubectl
[root@master yum.repos.d]# systemctl  restart  kubelet
[root@master yum.repos.d]# systemctl  enable  kubelet
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.

[root@node1 yum.repos.d]# dnf  -y  install kubeadm  kubelet  kubectl
[root@node1 yum.repos.d]# systemctl  restart  kubelet
[root@node1 yum.repos.d]# systemctl  enable  kubelet
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.

[root@node2 yum.repos.d]# dnf  -y  install kubeadm  kubelet  kubectl
[root@node2 yum.repos.d]# systemctl  restart  kubelet
[root@node2 yum.repos.d]# systemctl  enable  kubelet
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.


配置containerd

[root@master yum.repos.d]# containerd config default > /etc/containerd/config.toml
[root@master yum.repos.d]# vim /etc/containerd/config.toml
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6"
[root@master yum.repos.d]# systemctl   restart  containerd
[root@master yum.repos.d]# systemctl   enable  containerd
Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /usr/lib/systemd/system/containerd.service.

部署k8s的master节点

[root@master ~]# kubeadm init \  
--apiserver-advertise-address=192.168.17.128 \       //master主机ip
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.25.4 \
--service-cidr=10.96.0.0/12 \

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.17.128:6443 --token 5ki9jv.a0a8wuuq1yejhndx \
	--discovery-token-ca-cert-hash sha256:87a3a1d46a3813c79b29e61ae3a0bc9836a971136731974822246738aad34460 

将初始化内容保存在某个文件中

[root@master ~]# vim /etc/profile.d/k8s.sh
[root@master ~]# cat /etc/profile.d/k8s.sh
export KUBECONFIG=/etc/kubernetes/admin.conf
[root@master ~]# source /etc/profile.d/k8s.sh

安装pod网络插件

[root@master ~]# wget https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
[root@master ~]# kubectl apply  -f  kube-flannel.yml
namespace/kube-flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES           AGE   VERSION
master   NotReady   control-plane   24m   v1.25.4

将node节点加入到k8s集群中

node1
[root@node1 ~]# kubeadm join 192.168.17.128:6443 --token 5ki9jv.a0a8wuuq1yejhndx \
> --discovery-token-ca-cert-hash sha256:87a3a1d46a3813c79b29e61ae3a0bc9836a971136731974822246738aad34460 
[preflight] Running pre-flight checks
	[WARNING FileExisting-tc]: tc not found in system path
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.


node2
[root@node2 ~]# kubeadm join 192.168.17.128:6443 --token  5ki9jv.a0a8wuuq1yejhndx --discovery-token-ca-cert-hash sha256:87a3a1d46a3813c79b29e61ae3a0bc9836a971136731974822246738aad34460 

kubectl get nodes 查看node状态

[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES           AGE     VERSION
master   NotReady   control-plane   47m     v1.25.4
node1    NotReady   <none>          2m34s   v1.25.4
node2    NotReady   <none>          39s     v1.25.4

使用k8s集群创建一个pod,运行nginx容器

[root@master ~]# kubectl create  deployment  nginx  --image nginx
deployment.apps/nginx created
[root@master ~]# kubectl  expose  deployment  nginx  --port 80  --type NodePort
service/nginx exposed
[root@master ~]# kubectl  get  pods  -o  wide
NAME                    READY   STATUS    RESTARTS   AGE   IP           NODE    NOMINATED NODE   READINESS GATES
nginx-76d6c9b8c-mjvkt   1/1     Running   0          12m   10.244.2.2   node2   <none>           <none>
[root@master ~]# kubectl  get  services
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        19m
nginx        NodePort    10.98.195.122   <none>        80:30022/TCP   16s


在这里插入图片描述
修改默认网页

[root@master ~]# kubectl exec -it pod/nginx-76d6c9b8c-mjvkt -- /bin/bash
root@nginx-76d6c9b8c-mjvkt:/# cd /usr/share/nginx/html/
root@nginx-76d6c9b8c-mjvkt:/usr/share/nginx/html# echo "guanbingjie" > index.html

在这里插入图片描述

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值