Kubernetes快速部署

Kubernetes快速部署

kubeadm是官方社区推出的一个用于快速部署kubernetes集群的工具。

这个工具能通过两条指令完成一个kubernetes集群的部署:

# 创建一个 Master 节点
$ kubeadm init

# 将一个 Node 节点加入到当前集群中
$ kubeadm join <Master节点的IP和端口>

2.1 安装要求

在开始之前,部署Kubernetes集群机器需要满足以下几个条件:

-至少3台机器,操作系统 CentOS7+

  • 硬件配置:2GB或更多RAM,2个CPU或更多CPU,硬盘20GB或更多
  • 集群中所有机器之间网络互通
  • 可以访问外网,需要拉取镜像
  • 禁止swap分区

2.2 学习目标

  1. 在所有节点上安装Docker和kubeadm
  2. 部署Kubernetes Master
  3. 部署容器网络插件
  4. 部署 Kubernetes Node,将节点加入Kubernetes集群中
  5. 部署Dashboard Web页面,可视化查看Kubernetes资源

2.3 准备环境及部署安装

xx3

系统版本IP角色
centos8192.168.136.140master
centos8192.168.136.142node1
centos8192.168.136.155node2

在所有虚拟机上修改主机名

[root@localhost ~]# hostnamectl set-hostname master
[root@localhost ~]# bash
[root@master ~]# 

[root@localhost ~]#  hostnamectl set-hostname node1
[root@localhost ~]# bash

[root@localhost ~]#  hostnamectl set-hostname node2
[root@localhost ~]# bash
[root@node2 ~]# 

在所有虚拟机上关闭防火墙

# 关闭master主机
[root@master ~]# systemctl stop firewalld
[root@master ~]# systemctl disable firewalld
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@master ~]# vim /etc/selinux/config 
[root@master ~]# cat /etc/selinux/config 

# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
#     enforcing - SELinux security policy is enforced.
#     permissive - SELinux prints warnings instead of enforcing.
#     disabled - No SELinux policy is loaded.
SELINUX=disabled
# SELINUXTYPE= can take one of these three values:
#     targeted - Targeted processes are protected,
#     minimum - Modification of targeted policy. Only selected processes are protected. 
#     mls - Multi Level Security protection.
SELINUXTYPE=targeted

[root@master ~]# reboot


# 在node1 节点主机
[root@node1 ~]# systemctl stop firewalld
[root@node1 ~]# systemctl disable firewalld
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@node1 ~]#  sed -i 's/enforcing/disabled/' /etc/selinux/config 
[root@node1 ~]# reboot

# 在node2 节点主机
[root@node2 ~]# systemctl stop firewalld
[root@node2 ~]# systemctl disable firewalld
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@node2 ~]#  sed -i 's/enforcing/disabled/' /etc/selinux/config 
[root@node2 ~]# reboot

在所有虚拟机上关闭swap分区

# 在master节点主机
[root@master ~]# swapoff -a
[root@master ~]# cat /etc/fstab 
........
UUID=8ed9e77d-0bb9-4723-a705-21f3d6c10813 /boot                   xfs     defaults        0 0
#/dev/mapper/cs-swap     none                    swap    defaults        0 0  // 最后一行加上注释或者删除

# 在node1 节点主机
[root@node1 ~]# swapoff -a
[root@node1 ~]# cat /etc/fstab 
#/dev/mapper/cs-swap     none                    swap    defaults        0 0 //加上注释

# 在node2节点主机
[root@node2 ~]# swapoff -a
[root@node2 ~]# vim /etc/fstab 
[root@node2 ~]# cat /etc/fstab 
UUID=1afbaf1a-9625-415a-a9a0-438ee0488240 /boot                   xfs     defaults        0 0
#/dev/mapper/cs-swap     none                    swap    defaults        0 0  //加上注释

在所有虚拟主机上添加映射关系

# 在master节点主机
[root@master ~]# cat /etc/hosts 
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.136.140 master master.example.com
192.168.136.142 node1  node1.example.com
192.168.136.155 node2  node2.example.com

# 在node1 节点主机
[root@node1 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.136.140 master master.example.com
192.168.136.142 node1  node1.example.com
192.168.136.155 node2  node2.example.com
[root@node1 ~]# 

# 在node2 节点主机
[root@node2 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.136.140 master master.example.com
192.168.136.142 node1  node1.example.com
192.168.136.155 node2  node2.example.com

测试是否可以ping通

# 在master节点主机,看是否ping通node1 ,node2
[root@master ~]# ping node1
PING node1 (192.168.136.142) 56(84) bytes of data.
64 bytes from node1 (192.168.136.142): icmp_seq=1 ttl=64 time=0.432 ms
^Z
[1]+  Stopped                 ping node1

[root@master ~]# ping node2
PING node2 (192.168.136.155) 56(84) bytes of data.
64 bytes from node2 (192.168.136.155): icmp_seq=1 ttl=64 time=0.733 ms
^Z
[2]+  Stopped                 ping node2
[root@master ~]# 

# 在node1和node2节点主机,看是否ping通master
[root@node1 ~]# ping master
PING master (192.168.136.140) 56(84) bytes of data.
64 bytes from master (192.168.136.140): icmp_seq=1 ttl=64 time=0.587 
^Z
[1]+  Stopped                 ping master
[root@node1 ~]# 

[root@node2 ~]# ping master
PING master (192.168.136.140) 56(84) bytes of data.
64 bytes from master (192.168.136.140): icmp_seq=1 ttl=64 time=1.14 ms
^Z
[1]+  已停止               ping master
[root@node2 ~]# 

将桥接的IPv4流量传递到iptables的链(所有主机)

# 在master节点主机
[root@master ~]# cat > /etc/sysctl.d/k8s.conf << EOF
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> net.ipv4.ip_forward = 1
> EOF
[root@master ~]# 


# 生效
[root@master ~]# sysctl --system
* Applying /etc/sysctl.d/k8s.conf ... //看到这句话就表示它生效
net.ipv4.ip_forward = 1
* Applying /etc/sysctl.conf ...

在所有主机上安装时间同步

# 在master节点主机
[root@master ~]# yum -y install chrony
[root@node2 ~]# vim /etc/chrony.conf 
#pool 2.centos.pool.ntp.org iburst //删除或者注释这行
pool time1.aliyun.com iburst //添加这行阿里云的
[root@master ~]# systemctl restart chronyd  (重启)

# 在node1节点主机
[root@node1 ~]# yum -y install chrony
[root@node1 ~]# vim /etc/chrony.conf 
pool master.example.com iburst //这里是改成master域名
root@node1 ~]# systemctl restart chronyd


# 在node2节点主机
[root@node2 ~]# yum -y install chrony

[root@node2 ~]# vim /etc/chrony.conf 
 pool master.example.com iburst //添加这行
[root@node2 ~]# systemctl restart chronyd  (重启)


# 在master节点主机
[root@master ~]# chronyc sources
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^* 203.107.6.88  (找的阿里云同步的)                 2   6   117     4   +183us[ +300us] +/-   28ms

# 在node1 和 node2 节点主机
[root@node1 ~]# chronyc sources
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^? master  (找的master)                       0   8     0     -     +0ns[   +0ns] +/-    0ns
[root@node1 ~]# 


[root@node2 ~]# chronyc sources
MS Name/IP address         Stratum Poll Reach LastRx Last sample               
===============================================================================
^? master                        0   8     0     -     +0ns[   +0ns] +/-    0ns
[root@node2 ~]# 

设置免密登入

# 在master节点主机
[root@master ~]#  ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:IuO8zGlMlyeeVnqzBw6sgNltYJucF39y23MxORFSbsw root@master
The key's randomart image is:
+---[RSA 3072]----+
|           ..    |
|          .+.    |
|           .E.   |
|  o .      ..    |
| * *o+..S    o   |
|o Bo=oO.*   =    |
|   *o+ % +   +   |
|   o+o= = + .    |
|   .=. ..+ o     |
+----[SHA256]-----+

[root@master ~]# ssh-copy-id master //自身也要连
[root@master ~]# ssh-copy-id node1
[root@master ~]# ssh-copy-id node2

查看时间是否一致

# 设置时间时区
[root@master ~]# timedatectl set-timezone Asia/Shanghai
[root@node1 ~]# timedatectl set-timezone Asia/Shanghai
[root@node2 ~]# timedatectl set-timezone Asia/Shanghai

# 查看在master节点主机
[root@master ~]# for i in master node1 node2;do ssh $i 'date';done 
Mon Nov 13 15:26:01 CST 2023
Mon Nov 13 15:26:01 CST 2023
Mon Nov 13 15:26:01 CST 2023
[root@master ~]# 
所有节点安装Docker/kubeadm/kubelet
Kubernetes默认CRI(容器运行时)为Docker,因此先安装Docker。
安装docker
# 在节点主机master
[root@master ~]# yum install -y yum-utils device-mapper-persistent-data lvm2 
[root@master ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
[root@master ~]# yum -y install docker-ce
[root@master ~]#  systemctl enable --now docker
Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /usr/lib/systemd/system/docker.service.
[root@master ~]# vim /etc/docker/daemon.json
[root@master ~]# cat /etc/docker/daemon.json
{
       	"registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
}
[root@master ~]# systemctl restart docker
[root@master ~]# docker info
  https://b9pmyelo.mirror.aliyuncs.com/ //看是否有这行


# 在节点主机node1
[root@node1 ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
[root@node1 ~]# yum -y install docker-ce
[root@node1 ~]# systemctl enable --now docker
Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /usr/lib/systemd/system/docker.service.
 [root@node1 ~]# vim /etc/docker/daemon.json
[root@node1 ~]# cat /etc/docker/daemon.json
{
       "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
}

[root@node1 ~]# systemctl restart docker
[root@node1 ~]# docker info
 Registry Mirrors:
  https://b9pmyelo.mirror.aliyuncs.com/
 Live Restore Enabled: false


# 在节点主机node2
[root@node2 ~]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
[root@node2 ~]# yum -y install docker-ce
[root@node2 ~]# systemctl enable --now docker
Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /usr/lib/systemd/system/docker.service.
[root@node2 ~]# vim /etc/docker/daemon.json
[root@node2 ~]# cat /etc/docker/daemon.json
{
       "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"]
}
[root@node2 ~]# systemctl restart docker 
所有主机添加kubernetes阿里云YUM软件源
# 在节点主机master
[root@master ~]# cat > /etc/yum.repos.d/kubernetes.repo << EOF
 [kubernetes]
 name=Kubernetes
 baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
 enabled=1
 gpgcheck=0
 repo_gpgcheck=0
 gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF
[root@master ~]# 

# 在节点主机node1
[root@node1 ~]# cat > /etc/yum.repos.d/kubernetes.repo << EOF
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=0
> repo_gpgcheck=0
> gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF
[root@node1 ~]# 

# 在节点主机node2
[root@node2 ~]# cat > /etc/yum.repos.d/kubernetes.repo << EOF
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=0
> repo_gpgcheck=0
> gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF
[root@node2 ~]# 

安装kubeadm,kubelet和kubectl在使用主机节点上
# 在节点主机master
[root@master ~] yum list all |grep kubelet
[root@master ~]# yum -y install kubelet kubectl kubeadm
[root@master ~]# systemctl stop kubelet
[root@master ~]# systemctl enable kubelet
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.
[root@master ~]# 

# 在节点主机node1
[root@node1 ~]# yum -y install kubelet kubectl kubeadm
[root@node1 ~]# systemctl enable kubelet

# 在节点主机node2
[root@node2 ~]# yum -y install kubelet kubectl kubeadm
[root@node2 ~]# systemctl enable kubelet

containerd配置

为确保后面集群初始化及加入集群能够成功执行,需要配置containerd的配置文件/etc/containerd/config.toml,此操作需要在所有节点执行

# 在节点主机master
[root@master ~]# cd /etc/containerd/
[root@master containerd]# ls
config.toml
1 . 可以先查一些文件的内容
[root@master containerd]# cat config.toml 

2. 使用命令生成,再次查看,文件会增加一些内容
[root@master containerd]# containerd config default > /etc/containerd/config.toml
[root@master containerd]# vim config.toml 
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6" //这行修改
# 重启服务及设置开机自启
[root@master ~]# systemctl restart containerd
[root@master ~]# systemctl enable containerd
Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /usr/lib/systemd/system/containerd.service.
[root@master ~]# 

# 在节点主机node1
[root@node1 ~]# containerd config default > /etc/containerd/config.toml
[root@node1 containerd]# vim config.toml 
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6" //这行修改
[root@node1 ~]# systemctl restart containerd
[root@node1 ~]# systemctl enable containerd

# 在节点主机node2
[root@node2 ~]# containerd config default > /etc/containerd/config.toml
[root@node1 containerd]# vim config.toml 
sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6" //这行修改
[root@node2 ~]# systemctl restart containerd
[root@node2 ~]# systemctl enable containerd

部署Kubernetes Master

在master主机执行

[root@master containerd]# kubeadm init \
  --apiserver-advertise-address=192.168.136.140 \
  --image-repository registry.aliyuncs.com/google_containers \
  --kubernetes-version v1.28.2 \
  --service-cidr=10.96.0.0/12 \
  --pod-network-cidr=10.244.0.0/16
[init] Using Kubernetes version: v1.28.2
[preflight] Running pre-flight checks
	[WARNING FileExisting-tc]: tc not found in system path
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
..........

To start using your cluster, you need to run the following as a regular user: (从这行开始,下面全部复制到一个文件)

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf 

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.136.140:6443 --token pfshvh.jmujin5uxg2c5qaw \
	--discovery-token-ca-cert-hash sha256:d0f9f31fe8398ed869c21e870231da7004eb899b00af7627f72c9f368a4b220d 

创建一个新文件file保存这些内容

[root@master containerd]# cat file
To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf  //运行这行

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.136.140:6443 --token pfshvh.jmujin5uxg2c5qaw \
	--discovery-token-ca-cert-hash sha256:d0f9f31fe8398ed869c21e870231da7004eb899b00af7627f72c9f368a4b220d 

[root@master containerd]# export KUBECONFIG=/etc/kubernetes/admin.conf
# 设置自动加载
[root@master containerd]# echo "KUBECONFIG=/etc/kubernetes/admin.conf" > /etc/profile.d/k8s.sh
[root@master containerd]# echo $KUBECONFIG 
/etc/kubernetes/admin.conf
[root@master containerd]# 

部署pod network

涉及flannel

# 下载flannel
[root@master ~]# wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml


# 用一个文件部署flannel组件
[root@master ~]# kubectl apply -f kube-flannel.yml 
namespace/kube-flannel created
serviceaccount/flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
[root@master ~]# 

# 查看一下这个文件部署了哪些资源
[root@master ~]# kubectl get -f kube-flannel.yml 
NAME                     STATUS   AGE
namespace/kube-flannel   Active   2m22s

NAME                     SECRETS   AGE
serviceaccount/flannel   0         2m22s

NAME                                            CREATED AT
clusterrole.rbac.authorization.k8s.io/flannel   2023-11-13T14:57:58Z

NAME                                                   ROLE                  AGE
clusterrolebinding.rbac.authorization.k8s.io/flannel   ClusterRole/flannel   2m22s

NAME                         DATA   AGE
configmap/kube-flannel-cfg   2      2m22s

NAME                             DESIRED   CURRENT   READY   UP-TO-DATE   AVAILABLE   NODE SELECTOR   AGE
daemonset.apps/kube-flannel-ds   1         1         0       1            0           <none>          2m22s
[root@master ~]# 

查看一下kube-system组件

[root@master ~]# kubectl get pods -n kube-system 
NAME                             READY   STATUS    RESTARTS   AGE
coredns-66f779496c-949cm         1/1     Running   0          5h18m
coredns-66f779496c-mj9x4         1/1     Running   0          5h18m
etcd-master                      1/1     Running   1          5h18m
kube-apiserver-master            1/1     Running   1          5h18m
kube-controller-manager-master   1/1     Running   1          5h18m //看是否全部运行状态
kube-proxy-h2cq8                 1/1     Running   0          5h18m
kube-scheduler-master            1/1     Running   1          5h18m
[root@master ~]# 

将node1 和 node2 加入集群中

# 查看master中创建的新文件file
..................
kubeadm join 192.168.136.140:6443 --token pfshvh.jmujin5uxg2c5qaw \   # 找到这些内容
	--discovery-token-ca-cert-hash sha256:d0f9f31fe8398ed869c21e870231da7004eb899b00af7627f72c9f368a4b220d  
	
# 然后运行在node1上和node2上面
# 在主机node1操作
[root@node1 ~]# kubeadm join 192.168.136.140:6443 --token pfshvh.jmujin5uxg2c5qaw \
> --discovery-token-ca-cert-hash sha256:d0f9f31fe8398ed869c21e870231da7004eb899b00af7627f72c9f368a4b220d
[preflight] Running pre-flight checks
	[WARNING FileExisting-tc]: tc not found in system path
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@node1 ~]# 


# 在master节点主机查看是否被加进去了
[root@master ~]# kubectl get nodes
NAME     STATUS     ROLES           AGE     VERSION
master   Ready      control-plane   5h26m   v1.28.2
node1    NotReady   <none>          3m50s   v1.28.2
[root@master ~]# 
[root@master ~]# kubectl get pods -n kube-flannel
NAME                    READY   STATUS    RESTARTS   AGE
kube-flannel-ds-9xfkd   1/1     Running   0          6m1s
kube-flannel-ds-x4hlh   1/1     Running   0          17m
[root@master ~]# 

在主机node2上

[root@node2 ~]# kubeadm join 192.168.136.140:6443 --token pfshvh.jmujin5uxg2c5qaw \
> --discovery-token-ca-cert-hash sha256:d0f9f31fe8398ed869c21e870231da7004eb899b00af7627f72c9f368a4b220d
[preflight] Running pre-flight checks
	[WARNING FileExisting-tc]: tc not found in system path
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.

[root@master ~]# kubectl get pods -n kube-flannel
NAME                    READY   STATUS    RESTARTS   AGE
kube-flannel-ds-bbrlj   1/1     Running   0          11m
kube-flannel-ds-l498s   1/1     Running   0          13m
kube-flannel-ds-zlmzd   1/1     Running   0          11m
[root@master ~]# 

[root@master ~]# kubectl get nodes
NAME     STATUS   ROLES           AGE   VERSION
master   Ready    control-plane   14m   v1.28.2
node1    Ready    <none>          10m   v1.28.2
node2    Ready    <none>          10m   v1.28.2
[root@master ~]# 

测试Kubernetes 集群
# /创建一个名字叫nginx,镜像为nginx,在default名称空间里面的pod
[root@master ~]# kubectl create deployment nginx --image=nginx
deployment.apps/nginx created

# 查看pod状态,已经跑起来了,在内部使用容器ip访问
[root@master ~]#  kubectl get pods -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP           NODE    NOMINATED NODE   READINESS GATES
nginx-7854ff8877-59l4j   1/1     Running   0          10m   10.244.2.2   node2   <none>           <none>
[root@master ~]# 


//内部访问
[root@master ~]# curl 10.244.2.2
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>
[root@master ~]# 

//暴露端口号
[root@master ~]# kubectl expose deployment nginx --port=80 --type=NodePort
service/nginx exposed
[root@master ~]# kubectl get svc
NAME         TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        23m
nginx        NodePort    10.102.27.148   <none>        80:32167/TCP   4m25s
[root@master ~]# 

//查看
[root@master ~]# kubectl get pod,svc
NAME                         READY   STATUS    RESTARTS   AGE
pod/nginx-7854ff8877-59l4j   1/1     Running   0          17m

NAME                 TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
service/kubernetes   ClusterIP   10.96.0.1       <none>        443/TCP        24m
service/nginx        NodePort    10.102.27.148   <none>        80:32167/TCP   5m23s
[root@master ~]# 

然后使用管理节点的ip加映射的端口号在浏览器访问

真机ip+32597访问

在这里插入图片描述

扩展命令

# 查看命名空间
[root@master ~]# kubectl get namespace
NAME              STATUS   AGE
default           Active   5h14m
kube-flannel      Active   4m13s
kube-node-lease   Active   5h14m
kube-public       Active   5h14m
kube-system       Active   5h14m
[root@master ~]# 

# 查看 kube-flannel命名空间
[root@master ~]# kubectl get pods -n kube-flannel
NAME                    READY   STATUS    RESTARTS   AGE
kube-flannel-ds-x4hlh   1/1     Running   0          5m51s
[root@master ~]# 

# 可以显示和查看ip
[root@master ~]# kubectl get pods -n kube-flannel -o wide
NAME                    READY   STATUS    RESTARTS   AGE    IP                NODE     NOMINATED NODE   READINESS GATES
kube-flannel-ds-9xfkd   1/1     Running   0          5m3s   192.168.136.142   node1    <none>           <none>
kube-flannel-ds-x4hlh   1/1     Running   0          16m    192.168.136.140   master   <none>           <none>
kube-flannel-ds-mhn7s   1/1     Running   0          2m41s   192.168.136.55   node2    <none>           <none>
[root@master ~]# 
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值