Kubernetes

Kubernetes快速部署

  • 在所有节点上安装Docker和kubeadm
  • 部署Kubernetes Master
  • 部署容器网络插件
  • 部署 Kubernetes Node,将节点加入Kubernetes集群中
  • 部署Dashboard Web页面,可视化查看Kubernetes资源

准备环境:

//以下的操作所有主机都要做
//关闭所有主机的防火墙,selinux
[root@k8s-master ~]# systemctl disable --now firewalld
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@k8s-master ~]# setenforce 0
[root@k8s-master ~]# vim /etc/selinux/config 
[root@k8s-node1 ~]# systemctl disable --now firewalld
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@k8s-node1 ~]# setenforce 0
[root@k8s-node1 ~]# vim /etc/selinux/config 
[root@k8s-node2 ~]# systemctl disable --now firewalld
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@k8s-node2 ~]# setenforce 0
[root@k8s-node2 ~]# vim /etc/selinux/config 
//关闭所有主机的swap分区:
# vim /etc/fstab
//注释掉swap分区
[root@k8s-master ~]# free -m
              total        used        free      shared  buff/cache   available
Mem:           3752         556        2656          10         539        2956
Swap:          4051           0        4051
[root@k8s-master ~]# vim /etc/fstab 
[root@k8s-node1 ~]# free -m
              total        used        free      shared  buff/cache   available
Mem:           1800         550         728          10         521        1084
Swap:          2047           0        2047
[root@k8s-node1 ~]# vim /etc/fstab 
[root@k8s-node2 ~]# free -m
              total        used        free      shared  buff/cache   available
Mem:           1800         559         711          10         529        1072
Swap:          2047           0        2047
[root@k8s-node2 ~]# vim /etc/fstab 
//在master添加hosts:
[root@k8s-master ~]# cat >> /etc/hosts << EOF
192.168.70.134 k8s-master
192.168.70.138 k8s-node1
192.168.70.139 k8s-node2
EOF
[root@k8s-node1 ~]# cat >> /etc/hosts << EOF
> 192.168.70.134 k8s-master
> 192.168.70.138 k8s-node1
> 192.168.70.139 k8s-node2
> EOF
[root@k8s-node2 ~]# cat >> /etc/hosts << EOF
> 192.168.70.134 k8s-master
> 192.168.70.138 k8s-node1
> 192.168.70.139 k8s-node2
> EOF
[root@k8s-master ~]# ping k8s-master      //测试
PING k8s-master (192.168.70.134) 56(84) bytes of data.
64 bytes from k8s-master (192.168.70.134): icmp_seq=1 ttl=64 time=0.072 ms
64 bytes from k8s-master (192.168.70.134): icmp_seq=2 ttl=64 time=0.080 ms
^C
--- k8s-master ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 41ms
rtt min/avg/max/mdev = 0.072/0.076/0.080/0.004 ms
[root@k8s-master ~]# ping k8s-node1
PING k8s-node1 (192.168.70.138) 56(84) bytes of data.
64 bytes from k8s-node1 (192.168.70.138): icmp_seq=1 ttl=64 time=0.512 ms
64 bytes from k8s-node1 (192.168.70.138): icmp_seq=2 ttl=64 time=0.285 ms
^C
[root@k8s-master ~]# ping k8s-node2
PING k8s-node2 (192.168.70.139) 56(84) bytes of data.
64 bytes from k8s-node2 (192.168.70.139): icmp_seq=1 ttl=64 time=1.60 ms
64 bytes from k8s-node2 (192.168.70.139): icmp_seq=2 ttl=64 time=0.782 ms
64 bytes from k8s-node2 (192.168.70.139): icmp_seq=3 ttl=64 time=1.32 ms

//将桥接的IPv4流量传递到iptables的链:
[root@k8s-master ~]#  cat > /etc/sysctl.d/k8s.conf << EOF
> net.bridge.bridge-nf-call-ip6tables = 1
> net.bridge.bridge-nf-call-iptables = 1
> EOF
[root@k8s-master ~]# sysctl --system   //生效
#省略过程
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...    //看见这个说明应用了
* Applying /etc/sysctl.conf ...
//时间同步,所有主机
[root@k8s-master ~]# vim /etc/chrony.conf
# Use public servers from the pool.ntp.org project.
# Please consider joining the pool (http://www.pool.ntp.org/join.html).
pool time1.aliyun.com iburst       //配置成阿里云的时间同步
[root@k8s-master ~]# systemctl enable chronyd
[root@k8s-master ~]# systemctl restart chronyd
[root@k8s-master ~]# systemctl status chronyd
● chronyd.service - NTP client/server
   Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; vendor preset: enab>
   Active: active (running) since Tue 2022-09-06 15:54:27 CST; 9s ago
[root@k8s-node1 ~]# vim /etc/chrony.conf 
[root@k8s-node1 ~]# systemctl enable chronyd
[root@k8s-node1 ~]# systemctl restart chronyd
[root@k8s-node1 ~]# systemctl status chronyd
● chronyd.service - NTP client/server
   Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; vendor preset: enab>
   Active: active (running) since Tue 2022-09-06 15:57:52 CST; 8s ago
[root@k8s-node2 ~]# vim /etc/chrony.conf 
[root@k8s-node2 ~]# systemctl enable chronyd
[root@k8s-node2 ~]# systemctl restart chronyd
[root@k8s-node2 ~]# systemctl status chronyd
● chronyd.service - NTP client/server
   Loaded: loaded (/usr/lib/systemd/system/chronyd.service; enabled; vendor preset: enab>
   Active: active (running) since Tue 2022-09-06
//配置免密登录
[root@k8s-master ~]# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa): 
Created directory '/root/.ssh'.
Enter passphrase (empty for no passphrase): 
Enter same passphrase again: 
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:LZeVhmrafNhs4eAGG8dNQltVYcGX/sXbKj/dPzR/wNo root@k8s-master
The key's randomart image is:
+---[RSA 3072]----+
|        . ...o=o.|
|       . o . o...|
|        o o + .o |
|       . * +   .o|
|      o S *  .  =|
|       @ O .  o+o|
|      o * *  o.++|
|       . o  o E.=|
|             o..=|
+----[SHA256]-----+
[root@k8s-master ~]# ssh-copy-id k8s-master
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'k8s-master (192.168.70.134)' can't be established.
ECDSA key fingerprint is SHA256:1x2Tw0BYQrGTk7wpwsIy+TtFN72hWbHYYiU6WtI/Ojk.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@k8s-master's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'k8s-master'"
and check to make sure that only the key(s) you wanted were added.

[root@k8s-master ~]# ssh-copy-id k8s-node1
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'k8s-node1 (192.168.70.138)' can't be established.
ECDSA key fingerprint is SHA256:75svPGZTNSPdFX6K4lCDkoQfG10Y478mu0NzQD7HpnA.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@k8s-node1's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'k8s-node1'"
and check to make sure that only the key(s) you wanted were added.

[root@k8s-master ~]# ssh-copy-id k8s-node2
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'k8s-node2 (192.168.70.139)' can't be established.
ECDSA key fingerprint is SHA256:75svPGZTNSPdFX6K4lCDkoQfG10Y478mu0NzQD7HpnA.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@k8s-node2's password: 

Number of key(s) added: 1

Now try logging into the machine, with:   "ssh 'k8s-node2'"
and check to make sure that only the key(s) you wanted were added.

[root@k8s-master ~]# ssh k8s-master
Activate the web console with: systemctl enable --now cockpit.socket

This system is not registered to Red Hat Insights. See https://cloud.redhat.com/
To register this system, run: insights-client --register

Last login: Tue Sep  6 15:10:17 2022 from 192.168.70.1
[root@k8s-master ~]# ssh k8s-node1
Activate the web console with: systemctl enable --now cockpit.socket

This system is not registered to Red Hat Insights. See https://cloud.redhat.com/
To register this system, run: insights-client --register

Last login: Tue Sep  6 15:10:18 2022 from 192.168.70.1
[root@k8s-node1 ~]# exit
注销
Connection to k8s-node1 closed.
[root@k8s-master ~]# ssh k8s-node2
Activate the web console with: systemctl enable --now cockpit.socket

This system is not registered to Red Hat Insights. See https://cloud.redhat.com/
To register this system, run: insights-client --register

Last login: Tue Sep  6 15:10:18 2022 from 192.168.70.1
[root@k8s-node2 ~]# exit
注销
Connection to k8s-node2 closed.
[root@k8s-master ~]# reboot   //前面设置了seliunx,swap分区,重启确保他永久生效
[root@k8s-node1 ~]# reboot
[root@k8s-node2 ~]# reboot
#//注意重启完了检查防火墙和seliunx,swap分区是是否关闭

所有节点安装Docker/kubeadm/kubelet

Kubernetes默认CRI(容器运行时)为Docker,因此先安装Docker。

安装Docker

##注意所有docker的版本要一致,用dnf list all|grep docker 命令查看 docker-ce.x86_64 版本是否一致
#所有节点都做下面的操作
[root@k8s-master yum.repos.d]# wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo      //下载docker
//省略过程

[root@k8s-master ~]# dnf list all|grep docker
containerd.io.x86_64                                              1.6.8-3.1.el8                                          @docker-ce-stable
docker-ce.x86_64     //这个                                             3:20.10.17-3.el8                                       @docker-ce-stable
docker-ce-cli.x86_64                                              1:20.10.17-3.el8                                       @docker-ce-stable

[root@k8s-master ~]# dnf -y install docker-ce --allowerasiong      //正常情况不用加--allowerasiong来替换冲突的软件包 ,我的源有问题所有加这个
[root@k8s-master ~]# systemctl enable --now docker
Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /usr/lib/systemd/system/docker.service.
[root@k8s-master ~]# docker version 
Client: Docker Engine - Community
 Version:           20.10.17      //版本要统一
 API version:       1.41
 Go version:        go1.17.11
 Git commit:        100c701
 Built:             Mon Jun  6 23:03:11 2022
 OS/Arch:           linux/amd64
 Context:           default
 Experimental:      true

Server: Docker Engine - Community
 Engine:
  Version:          20.10.17
  API version:      1.41 (minimum version 1.12)
  Go version:       go1.17.11
  Git commit:       a89b842
  Built:            Mon Jun  6 23:01:29 2022
  OS/Arch:          linux/amd64
  Experimental:     false
 containerd:
  Version:          1.6.8
  GitCommit:        9cd3357b7fd7218e4aec3eae239db1f68a5a6ec6
 runc:
  Version:          1.1.4
  GitCommit:        v1.1.4-0-g5fd4c4d
 docker-init:
  Version:          0.19.0
  GitCommit:        de40ad0
[root@k8s-master ~]# 
//配置加速器
[root@k8s-master ~]# cat > /etc/docker/daemon.json << EOF
> {
>   "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"],
>   "exec-opts": ["native.cgroupdriver=systemd"],
>   "log-driver": "json-file",
>   "log-opts": {
>     "max-size": "100m"
>   },
>   "storage-driver": "overlay2"
> }
> EOF
[root@k8s-master ~]# cat /etc/docker/daemon.json 
{
  "registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"],  //加速器
  "exec-opts": ["native.cgroupdriver=systemd"],   //驱动
  "log-driver": "json-file",   //格式
  "log-opts": {
    "max-size": "100m"    //100m开始运行
  },
  "storage-driver": "overlay2"    //存储驱动
}

添加kubernetes阿里云YUM软件源

#以下操作所有节点都要配置
[root@k8s-master ~]#  cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF


安装kubeadm,kubelet和kubectl

#以下操作所有节点都要配置
[root@k8s-master ~]# dnf list all|grep kubelet   //查看,要统一
kubelet.x86_64                                                    1.25.0-0                                               kubernetes       
[root@k8s-master ~]# dnf list all|grep kubeadm
kubeadm.x86_64                                                    1.25.0-0                                               kubernetes       
[root@k8s-master ~]# dnf list all|grep kubectl
kubectl.x86_64                                                    1.25.0-0                                               kubernetes       
[root@k8s-master ~]# dnf -y install kubelet kubeadm kubectl
[root@k8s-master ~]# systemctl enable kubelet      //不能启动
Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /usr/lib/systemd/system/kubelet.service.
[root@k8s-master ~]# systemctl status kubelet
● kubelet.service - kubelet: The Kubernetes Node Agent
   Loaded: loaded (/usr/lib/systemd/system/kubelet.service; enabled; vendor>
  Drop-In: /usr/lib/systemd/system/kubelet.service.d

部署Kubernetes Master

在192.168.70.134(Master)执行

[root@k8s-master ~]# kubeadm init -h     //看帮助文档
[root@k8s-master ~]# cd /etc/containerd/
[root@k8s-master containerd]# containerd config default > config.toml    //生成
[root@k8s-master containerd]# vim config.toml 
sandbox_image = "k8s.gcr.io/pause:3.6"     //改为  sandbox_image = "registry.cn-beijing.aliyuncs.com/abcdocker/pause:3.6"
[root@k8s-master manifests]# systemctl stop kubelet
[root@k8s-master manifests]# systemctl restart containerd
[root@k8s-master manifests]# kubeadm init --apiserver-advertise-address 192.168.70.134 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.25.0 --service-cidr 10.96.0.0/12 --pod-network-cidr 10.244.0.0/16
//省略过程
//除了192.168.70.134,其他基本都是固定写法
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube      //普通用户用这些命令
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf     //root用户用这些命令

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:        //要配置这个网络插件,在github.com ,上搜索flannel
  https://kubernetes.io/docs/concepts/cluster-administration/addons/       

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.70.134:6443 --token h9utko.9esdw3ge9j0urwae \
        --discovery-token-ca-cert-hash sha256:8c36d378e51b8d01f1fe904e51e1b5d7215fc76dcbaf105c798c4cda70e84ca1 

  //看到着说明初始化成功

//设置永久环境变量,用root方式
[root@k8s-master ~]# vim /etc/profile.d/k8s.sh
[root@k8s-master ~]# cat /etc/profile.d/k8s.sh
export KUBECONFIG=/etc/kubernetes/admin.conf
[root@k8s-master ~]# source /etc/profile.d/k8s.sh 
[root@k8s-master ~]# echo $KUBECONFIG
/etc/kubernetes/admin.conf

[root@k8s-master ~]# kubectl get nodes
NAME         STATUS     ROLES           AGE   VERSION
k8s-master   NotReady   control-plane   14m   v1.25.0


#  kubectl用法

```javascript
[root@master ~]# kubectl get nodes
NAME     STATUS   ROLES                  AGE   VERSION
master   Ready    control-plane,master   36h   v1.20.0
node1    Ready    <none>                 35h   v1.20.0
node2    Ready    <none>                 35h   v1.20.0
[root@master ~]# kubectl get pods -o wide
NAME                      READY   STATUS             RESTARTS   AGE   IP           NODE    NOMINATED NODE   READINESS GATES
apache-594d8494b4-s6dfd   0/1     ImagePullBackOff   0          19m   10.244.1.3   node1   <none>           <none>
nginx-6799fc88d8-nbgc2    1/1     Running            0          35h   10.244.1.2   node1   <none>     
[root@master ~]# kubectl create deployment deploy-web --image=itlang11:v0.1 --replicas=3
deployment.apps/deploy-web created


[root@master ~]# kubectl get pods
NAME                          READY   STATUS              RESTARTS   AGE
apache-594d8494b4-s6dfd       0/1     ImagePullBackOff    0          26m
deploy-web-79df5694b9-2sh6d   0/1     ImagePullBackOff    0          64s
deploy-web-79df5694b9-5lvqk   0/1     ImagePullBackOff    0          64s
[root@master ~]# kubectl delete deployment deploy-web
deployment.apps "deploy-web" deleted
[root@master ~]# kubectl get pods
NAME                          READY   STATUS             RESTARTS   AGE
apache-594d8494b4-s6dfd       0/1     ImagePullBackOff   0          29m
deploy-web-79df5694b9-2sh6d   0/1     Terminating        0          3m25s
deploy-web-79df5694b9-5lvqk   0/1     Terminating        0          3m25s
[root@master ~]# kubectl get pods
NAME                      READY   STATUS             RESTARTS   AGE
apache-594d8494b4-s6dfd   0/1     ImagePullBackOff   0          29m
nginx-6799fc88d8-nbgc2    1/1     Running            0          35h
[root@master ~]# kubectl get pods -w
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值