kubeadm初始化kubernetes集群

kubeadm初始化kubernetes集群

查看操作系统的版本

[root@master ~]# cat /etc/redhat-release 
CentOS Stream release 8
[root@node1 ~]# cat /etc/redhat-release 
CentOS Stream release 8
[root@node2 ~]# cat /etc/redhat-release 
CentOS Stream release 8

主机名解析(全部主机都要做)

[root@master ~]# cat /etc/hosts 
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.100.100  master.example.com  master
192.168.100.10  node1.example.com   node1
192.168.100.20  node2.example.com   node2
[root@master ~]# scp /etc/hosts root@192.168.100.10:/etc/hosts
The authenticity of host '192.168.87.131 (192.168.100.10)' can't be established.
ECDSA key fingerprint is SHA256:5gd7HmPvSJxbclz5gFTHK1WDjLN6Qwd8HpqA/enrYlI.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
Warning: Permanently added '192.168.100.10' (ECDSA) to the list of known hosts.
root@192.168.100.10's password: 
hosts                                                                                    100%  225   183.3KB/s   00:00    
[root@master ~]# scp /etc/hosts root@192.168.100.20:/etc/hosts
The authenticity of host '192.168.100.20 (192.168.100.20)' can't be established.
ECDSA key fingerprint is SHA256:r4SZXJQDBt9bqNG2NHuCdmfeRGLsX+INUbL3qWQ9NVI.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
Warning: Permanently added '192.168.100.20' (ECDSA) to the list of known hosts.
root@192.168.100.20's password: 
hosts        

发送密钥

[root@master ~]# ssh-keygen 
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:vUOq8potkuN7ibCgonxeX2BPaqUR/L70IDqiMnm5N4M root@master.example.com
The key's randomart image is:
+---[RSA 3072]----+
|                 |
|      .          |
|       o         |
|        o.       |
|       +S+o      |
|o     . Xo .     |
|o+ = o =.*o      |
|O EoX++.+ +.     |
|=*BO+O=. . .     |
+----[SHA256]-----+
[root@master ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub root@node1
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'node1 (192.168.100.10)' can't be established.
ECDSA key fingerprint is SHA256:5gd7HmPvSJxbclz5gFTHK1WDjLN6Qwd8HpqA/enrYlI.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@node1's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'root@node1'"
and check to make sure that only the key(s) you wanted were added.

[root@master ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub root@node2
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'node2 (192.168.100.20)' can't be established.
ECDSA key fingerprint is SHA256:r4SZXJQDBt9bqNG2NHuCdmfeRGLsX+INUbL3qWQ9NVI.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@node2's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'root@node2'"
and check to make sure that only the key(s) you wanted were added.

时钟同步

[root@master ~]# yum install -y chrony
[root@master ~]# vim /etc/chrony.conf 
···
local stratum 10  
···
[root@master ~]# systemctl restart chronyd
[root@master ~]# systemctl enable chronyd
[root@master ~]# hwclock -w


[root@node1 ~]# yum install -y chrony
[root@node1 ~]# vim /etc/chrony.conf 
server master.example.com  iburst
[root@node1 ~]# systemctl restart chronyd.service 
[root@node1 ~]# systemctl enable chronyd

[root@node2 ~]# yum install -y chrony
[root@node2 ~]# vim /etc/chrony.conf 
server master.example.com  iburst
[root@node2 ~]# systemctl restart chronyd
[root@node2 ~]# systemctl enable chronyd

禁用firewalld、selinux、postfix(全部节点都做)

[root@master ~]# systemctl stop firewalld
[root@master ~]# systemctl disable firewalld
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@master ~]# vim /etc/selinux/config 
SELINUX=disabled
[root@master ~]# setenforce 0
[root@master ~]# systemctl stop postfix
Failed to stop postfix.service: Unit postfix.service not loaded.

[root@node1 ~]# systemctl stop firewalld
[root@node1 ~]# systemctl disable firewalld
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@node1 ~]# vim /etc/selinux/config 
SELINUX=disabled
[root@node1 ~]# setenforce 0
[root@node1 ~]# systemctl stop postfix
Failed to stop postfix.service: Unit postfix.service not loaded.

[root@node2 ~]# systemctl stop firewalld
[root@node2 ~]# systemctl disable firewalld
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@node2 ~]# vim /etc/selinux/config 
SELINUX=disabled
[root@node2 ~]# systemctl stop postfix
Failed to stop postfix.service: Unit postfix.service not loaded.

禁用swap分区(全部节点都做)

[root@master ~]# vim /etc/fstab 
(注释掉swap分区那一行,下面一样。)
[root@master ~]# swapoff  -a

[root@node1 ~]# vim /etc/fstab 
[root@node1 ~]# swapoff  -a

[root@node2 ~]# vim /etc/fstab 
[root@node2 ~]# swapoff  -a

开启IP转发,和修改内核信息—全部节点都需要配置

[root@master ~]# vim /etc/sysctl.d/k8s.conf
[root@master ~]# cat /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@master ~]# modprobe br_netfilter
[root@master ~]# sysctl -p /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@node1 ~]# vim /etc/sysctl.d/k8s.conf
[root@node1 ~]# cat /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@node1 ~]# modprobe br_netfilter
[root@node1 ~]# sysctl -p /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@node2 ~]# vim /etc/sysctl.d/k8s.conf
[root@node2 ~]# cat /etc/sysctl.d/k8s.conf 
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@node2 ~]# modprobe br_netfilter
[root@node2 ~]# sysctl -p /etc/sysctl.d/k8s.conf 
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1

配置IPVS功能

[root@master ~]# vim /etc/sysconfig/modules/ipvs.modules
[root@master ~]# cat  /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
[root@master ~]# chmod +x /etc/sysconfig/modules/ipvs.modules
[root@master ~]# bash /etc/sysconfig/modules/ipvs.modules
[root@master ~]# lsmod | grep -e ip_vs
ip_vs_sh               16384  0
ip_vs_wrr              16384  0
ip_vs_rr               16384  0
ip_vs                 172032  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_defrag_ipv6         20480  1 ip_vs
nf_conntrack          155648  1 ip_vs
libcrc32c              16384  3 nf_conntrack,xfs,ip_vs
[root@master ~]# reboot

安装docker-ce

[root@master ~]#  dnf -y install docker-ce --allowerasing
[root@master ~]# systemctl restart docker
[root@master ~]# systemctl enable docker
Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /usr/lib/systemd/system/docker.service.

[root@node1 ~]#  dnf -y install docker-ce --allowerasing
[root@node1 yum.repos.d]# systemctl restart docker
[root@node1 yum.repos.d]# systemctl enable docker
Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /usr/lib/systemd/system/docker.service.

[root@node2 ~]#  dnf -y install docker-ce --allowerasing
[root@node2 ~]# systemctl restart docker
[root@node2 ~]# systemctl enable docker
Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /usr/lib/systemd/system/docker.service.

安装kubernetes组件

[root@master ~]# vim /etc/yum.repos.d/kubernetes.repo
[root@master ~]# cat /etc/yum.repos.d/kubernetes.repo 
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

[root@node1 ~]# vim /etc/yum.repos.d/kubernetes.repo
[root@node1 ~]# cat /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg

[root@node2 ~]# vim /etc/yum.repos.d/kubernetes.repo
[root@node2 ~]# cat /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
[root@master ~]# dnf  -y  install kubeadm  kubelet  kubectl
[root@master ~]# systemctl restart kubelet
[root@master ~]# systemctl enable kubelet
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.

[root@node1 ~]# dnf  -y  install kubeadm  kubelet  kubectl
[root@node1 ~]# systemctl  restart  kubelet
[root@node1 ~]# systemctl  enable  kubelet
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.

[root@node2 ~]# dnf  -y  install kubeadm  kubelet  kubectl
[root@node2 ~]# systemctl  restart  kubelet
[root@node2 ~]# systemctl  enable  kubelet
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.

配置containerd

[root@master ~]# containerd config default > /etc/containerd/config.toml
[root@master ~]# vim /etc/containerd/config.toml 
···
    sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6"
···
[root@master ~]# systemctl restart containerd
[root@master ~]# systemctl enable containerd
Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /usr/lib/systemd/system/containerd.service.

[root@node1 ~]# containerd config default > /etc/containerd/config.toml
[root@node1 ~]# vim /etc/containerd/config.toml 
···
    sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6"
···
[root@node1 ~]# systemctl restart containerd
[root@node1 ~]# systemctl enable containerd
Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /usr/lib/systemd/system/containerd.service.

[root@node2 ~]# containerd config default > /etc/containerd/config.toml
[root@node2 ~]# vim /etc/containerd/config.toml 
···
    sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6"
···
[root@node2 ~]# systemctl restart containerd
[root@node2 ~]# systemctl enable containerd
Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /usr/lib/systemd/system/containerd.service.

部署k8s的master节点(在master节点运行)

[root@master ~]# kubeadm init \
> --apiserver-advertise-address=192.168.100.100 \
> --image-repository registry.aliyuncs.com/google_containers \
> --kubernetes-version v1.25.4 \
> --service-cidr=10.96.0.0/12 \
> --pod-network-cidr=10.244.0.0/16
···
Your Kubernetes control-plane has initialized successfully!
···
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.100.100:6443 --token hs3pb5.b3qf8dso2n31grrf \
	--discovery-token-ca-cert-hash sha256:5efc0e46a7fbb33dd1cb61e3887208d66329fdb229cf0b59b8b8f72900d9c54f 

安装pod网络插件(CNI/flannel)

[root@master ~]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
--2022-11-17 22:48:06--  https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 4583 (4.5K) [text/plain]
Saving to: ‘kube-flannel.yml’

kube-flannel.yml                  100%[=============================================================>]   4.48K  5.66KB/s    in 0.8s    

2022-11-17 22:48:09 (5.66 KB/s) - ‘kube-flannel.yml’ saved [4583/4583]

[root@master ~]# kubectl apply  -f  kube-flannel.yml
namespace/kube-flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
[root@master ~]# ls
anaconda-ks.cfg  kube-flannel.yml
[root@master ~]# kubectl get nodes
NAME                 STATUS   ROLES           AGE     VERSION
master.example.com   Ready    control-plane   9m24s   v1.25.4

[root@master ~]# kubectl get pod -n kube-system
NAME                                         READY   STATUS    RESTARTS   AGE
coredns-d893dcc86f-cb74l                      1/1     Running   0          11m
coredns-d893cc86f-gvvxv                      1/1     Running   0          11m
etcd-master.example.com                      1/1     Running   0          11m
kube-apiserver-master.example.com            1/1     Running   0          11m
kube-controller-manager-master.example.com   1/1     Running   0          11m
kube-proxy-gjf6g                             1/1     Running   0          11m
kube-scheduler-master.example.com            1/1     Running   0   

将node节点加入到k8s集群中

[root@node1 ~]# kubeadm join 192.168.100.100:6443 --token hs3pb5.b3qf8dso2n31grrf \
> --discovery-token-ca-cert-hash sha256:7jhy0e46a7fbb33dd1cb61e3887208d66329fdb229cf0b59b8b8f72900d9c54f 

[root@node2 ~]# kubeadm join 192.168.100.100:6443 --token hs3pb5.b3qf8dso2n31grrf \
> --discovery-token-ca-cert-hash sha256:7jhy0e46a7fbb33dd1cb61e3887208d66329fdb229cf0b59b8b8f72900d9c54f 

[root@master ~]# kubectl get nodes 
NAME                 STATUS   ROLES           AGE    VERSION
master.example.com   Ready    control-plane   15m    v1.25.4
node1.example.com    Ready    <none>          103s   v1.25.4
node2.example.com    Ready    <none>          100s   v1.25.4

使用k8s集群创建一个pod,运行nginx容器

[root@master ~]# kubectl create  deployment  nginx  --image nginx
deployment.apps/nginx created
[root@master ~]# kubectl get pods
NAME                    READY   STATUS    RESTARTS   AGE
nginx-94e5c9b8c-bslt7   1/1     Running   0          57s
[root@master ~]# kubectl get pods -o wide
NAME                    READY   STATUS    RESTARTS   AGE   IP           NODE                NOMINATED NODE   READINESS GATES
nginx-94e5c9b8c-bslt7   1/1     Running   0          71s   10.244.2.2   node2.example.com   <none>           <none>
[root@master ~]# kubectl get services
NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   18m

测试

[root@master ~]# curl 10.244.2.2:80
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值