一.本实验要求:
要求使用centos7以上的镜像,推荐使用centos镜像7.4(4.21G)
下载链接及提取码:
链接: https://pan.baidu.com/s/1ENVgs7jM-sXC_RuvNX5yaw 提取码: zp43
二.所使用到的虚拟机:
192.168.31.61 k8s-master
192.168.31.62 k8s-node1
192.168.31.63 k8s-node2
三:实验流程:
1.所有节点关闭防火墙并设置开机禁止启动(以下都对所有节点进行操作):
[root@k8s-master ~]# systemctl stop firewalld.service
[root@k8s-master ~]# systemctl disable firewalld.service
2.永久关闭SElinux:
[root@k8s-master ~]# sed -i 's/enforcing/disabled/' /etc/selinux/config
[root@k8s-master ~]# cat /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# disabled - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of disabled.
# disabled - No SELinux policy is loaded.
SELINUX=disabled
# SELINUXTYPE= can take one of three two values:
# targeted - Targeted processes are protected,
# minimum - Modification of targeted policy. Only selected processes are protected.
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
[root@k8s-master ~]#
3.关闭swap分区:
[root@k8s-master ~]# swapoff -a
进入配置文件,把最后一行删除掉
[root@k8s-master ~]# cat /etc/fstab
#
# /etc/fstab
# Created by anaconda on Mon Mar 23 19:36:39 2020
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos-root / xfs defaults 0 0
UUID=ecb13695-25d2-4a0d-a277-93db5ec1ef39 /boot xfs defaults 0 0
#/dev/mapper/centos-swap swap swap defaults 0 0
执行命令
[root@k8s-master ~]# echo vm.swappiness=0 >> /etc/sysctl.conf
重启后查看内存:
[root@k8s-master ~]# free -m
total used free shared buff/cache available
Mem: 1823 338 756 8 728 1252
Swap: 0 0 0
4.设置主机名并使用bash生效(分别在对应的节点进行操作):
[root@k8s-master ~]# hostnamectl set-hostname k8s-master(两个node1,node2节点分别执行)
[root@k8s-master ~]# bash
[root@k8s-master ~]#
5.在所有节点添加hosts:
[root@k8s-master ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.31.61 k8s-master
192.168.31.62 k8s-node1
192.168.31.63 k8s-node2
[root@k8s-master ~]#
6.将桥接的IPV4流量传递到iptables:
[root@k8s-master ~]# cat >> /etc/sysctl.d/k8s.conf << EOF
> net.bridge.bridge-nf-ca11-ip6tables = 1
> net.bridge.bridge-nf-ca11-iptables = 1
> EOF
[root@k8s-master ~]# cat /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-ca11-ip6tables = 1
net.bridge.bridge-nf-ca11-iptables = 1
[root@k8s-master ~]#
生效:
[root@k8s-master ~]# sysctl --system
* Applying /usr/lib/sysctl.d/00-system.conf ...
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
* Applying /etc/sysctl.conf ...
[root@k8s-master ~]#
7.时间同步:
[root@k8s-master ~]# yum install -y ntpdate
已加载插件:fastestmirror
base | 3.6 kB 00:00:00
extras | 2.9 kB 00:00:00
updates | 2.9 kB 00:00:00
(1/2): extras/7/x86_64/primary_db | 165 kB 00:00:00
(2/2): updates/7/x86_64/primary_db | 7.6 MB 00:00:01
Determining fastest mirrors
* base: mirror.lzu.edu.cn
* extras: mirror.lzu.edu.cn
* updates: mirror.lzu.edu.cn
正在解决依赖关系
--> 正在检查事务
---> 软件包 ntpdate.x86_64.0.4.2.6p5-29.el7.centos 将被 安装
--> 解决依赖关系完成
依赖关系解决
====================================================================================================
Package 架构 版本 源 大小
====================================================================================================
正在安装:
ntpdate x86_64 4.2.6p5-29.el7.centos base 86 k
事务概要
====================================================================================================
安装 1 软件包
总下载量:86 k
安装大小:121 k
Downloading packages:
ntpdate-4.2.6p5-29.el7.centos.x86_64.rpm | 86 kB 00:00:00
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
正在安装 : ntpdate-4.2.6p5-29.el7.centos.x86_64 1/1
验证中 : ntpdate-4.2.6p5-29.el7.centos.x86_64 1/1
已安装:
ntpdate.x86_64 0:4.2.6p5-29.el7.centos
完毕!
[root@k8s-master ~]# ntpdate time.windows.com
16 Apr 13:29:35 ntpdate[12311]: adjust time server 40.81.188.85 offset 0.002105 sec
[root@k8s-master ~]# cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
cp: "/usr/share/zoneinfo/Asia/Shanghai" 与"/etc/localtime" 为同一文件
[root@k8s-master ~]#
8.安装docker,建议安装18.06.1版本的,最新版本BUG比较多,推荐18.06.1版本:
[root@k8s-master ~]# wget https://mirrors.aliyu.com/docker-ce/linux/centos/docker-ce.repo -o /etc/yum.repos.d/docker-ce.repo
[root@k8s-master ~]# cd /etc/yum.repos.d
[root@k8s-master yum.repos.d]# ll
总用量 32
-rw-r--r--. 1 root root 1664 8月 30 2017 CentOS-Base.repo
-rw-r--r--. 1 root root 1309 8月 30 2017 CentOS-CR.repo
-rw-r--r--. 1 root root 649 8月 30 2017 CentOS-Debuginfo.repo
-rw-r--r--. 1 root root 314 8月 30 2017 CentOS-fasttrack.repo
-rw-r--r--. 1 root root 630 8月 30 2017 CentOS-Media.repo
-rw-r--r--. 1 root root 1331 8月 30 2017 CentOS-Sources.repo
-rw-r--r--. 1 root root 3830 8月 30 2017 CentOS-Vault.repo
-rw-r--r-- 1 root root 374 4月 16 13:41 docker-ce.repo
如果遇到报错,可以下载我上传的文件直接使用:
链接: https://pan.baidu.com/s/1VqRXGj1rC0fKjvQW5yg0ow 提取码: 543k
安装之前先卸载安装的旧版本:
[root@k8s-master ~]#systemctl stop docker
[root@k8s-master ~]#yum -y remove docker-ce(io)
[root@k8s-master ~]#rm -rf /var/lib/docker
[root@k8s-master ~]# yum install -y gcc
[root@k8s-master ~]# yum install -y gcc-c++
[root@k8s-master ~]# yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotage \
docker-selinux \
docker-engine-selinux \
docker-engine
[root@k8s-master ~]# yum -y install docker-ce-18.06.1.ce-3.el7
已加载插件:fastestmirror
docker-ce-stable | 3.5 kB 00:00:00
(1/2): docker-ce-stable/x86_64/primary_db | 41 kB 00:00:00
(2/2): docker-ce-stable/x86_64/updateinfo | 55 B 00:00:00
Loading mirror speeds from cached hostfile
* base: mirror.lzu.edu.cn
* extras: mirror.lzu.edu.cn
* updates: mirror.lzu.edu.cn
正在解决依赖关系
--> 正在检查事务
---> 软件包 docker-ce.x86_64.0.18.06.1.ce-3.el7 将被 安装
--> 正在处理依赖关系 container-selinux >= 2.9,它被软件包 docker-ce-18.06.1.ce-3.el7.x86_64 需要
--> 正在处理依赖关系 libcgroup,它被软件包 docker-ce-18.06.1.ce-3.el7.x86_64 需要
--> 正在检查事务
---> 软件包 container-selinux.noarch.2.2.107-3.el7 将被 安装
--> 正在处理依赖关系 selinux-policy-targeted >= 3.13.1-216.el7,它被软件包 2:container-selinux-2.107-3.el7.noarch 需要
--> 正在处理依赖关系 selinux-policy-base >= 3.13.1-216.el7,它被软件包 2:container-selinux-2.107-3.el7.noarch 需要
--> 正在处理依赖关系 selinux-policy >= 3.13.1-216.el7,它被软件包 2:container-selinux-2.107-3.el7.noarch 需要
--> 正在处理依赖关系 policycoreutils-python,它被软件包 2:container-selinux-2.107-3.el7.noarch 需要
---> 软件包 libcgroup.x86_64.0.0.41-21.el7 将被 安装
--> 正在检查事务
---> 软件包 policycoreutils-python.x86_64.0.2.5-33.el7 将被 安装
--> 正在处理依赖关系 policycoreutils = 2.5-33.el7,它被软件包 policycoreutils-python-2.5-33.el7.x86_64 需要
--> 正在处理依赖关系 setools-libs >= 3.3.8-4,它被软件包 policycoreutils-python-2.5-33.el7.x86_64 需要
--> 正在处理依赖关系 libsemanage-python >= 2.5-14,它被软件包 policycoreutils-python-2.5-33.el7.x86_64 需要
--> 正在处理依赖关系 audit-libs-python >= 2.1.3-4,它被软件包 policycoreutils-python-2.5-33.el7.x86_64 需要
--> 正在处理依赖关系 python-IPy,它被软件包 policycoreutils-python-2.5-33.el7.x86_64 需要
--> 正在处理依赖关系 libqpol.so.1(VERS_1.4)(64bit),它被软件包 policycoreutils-python-2.5-33.el7.x86_64 需要
--> 正在处理依赖关系 libqpol.so.1(VERS_1.2)(64bit),它被软件包 policycoreutils-python-2.5-33.el7.x86_64 需要
--> 正在处理依赖关系 libapol.so.4(VERS_4.0)(64bit),它被软件包 policycoreutils-python-2.5-33.el7.x86_64 需要
--> 正在处理依赖关系 checkpolicy,它被软件包 policycoreutils-python-2.5-33.el7.x86_64 需要
--> 正在处理依赖关系 libqpol.so.1()(64bit),它被软件包 policycoreutils-python-2.5-33.el7.x86_64 需要
--> 正在处理依赖关系 libapol.so.4()(64bit),它被软件包 policycoreutils-python-2.5-33.el7.x86_64 需要
---> 软件包 selinux-policy.noarch.0.3.13.1-166.el7 将被 升级
---> 软件包 selinux-policy.noarch.0.3.13.1-252.el7_7.6 将被 更新
--> 正在处理依赖关系 libsemanage >= 2.5-13,它被软件包 selinux-policy-3.13.1-252.el7_7.6.noarch 需要
---> 软件包 selinux-policy-targeted.noarch.0.3.13.1-166.el7 将被 升级
---> 软件包 selinux-policy-targeted.noarch.0.3.13.1-252.el7_7.6 将被 更新
--> 正在检查事务
---> 软件包 audit-libs-python.x86_64.0.2.8.5-4.el7 将被 安装
--> 正在处理依赖关系 audit-libs(x86-64) = 2.8.5-4.el7,它被软件包 audit-libs-python-2.8.5-4.el7.x86_64 需要
---> 软件包 checkpolicy.x86_64.0.2.5-8.el7 将被 安装
---> 软件包 libsemanage.x86_64.0.2.5-8.el7 将被 升级
---> 软件包 libsemanage.x86_64.0.2.5-14.el7 将被 更新
--> 正在处理依赖关系 libsepol >= 2.5-10,它被软件包 libsemanage-2.5-14.el7.x86_64 需要
--> 正在处理依赖关系 libselinux >= 2.5-14,它被软件包 libsemanage-2.5-14.el7.x86_64 需要
---> 软件包 libsemanage-python.x86_64.0.2.5-14.el7 将被 安装
---> 软件包 policycoreutils.x86_64.0.2.5-17.1.el7 将被 升级
---> 软件包 policycoreutils.x86_64.0.2.5-33.el7 将被 更新
--> 正在处理依赖关系 libselinux-utils >= 2.5-14,它被软件包 policycoreutils-2.5-33.el7.x86_64 需要
---> 软件包 python-IPy.noarch.0.0.75-6.el7 将被 安装
---> 软件包 setools-libs.x86_64.0.3.3.8-4.el7 将被 安装
--> 正在检查事务
---> 软件包 audit-libs.x86_64.0.2.7.6-3.el7 将被 升级
--> 正在处理依赖关系 audit-libs(x86-64) = 2.7.6-3.el7,它被软件包 audit-2.7.6-3.el7.x86_64 需要
---> 软件包 audit-libs.x86_64.0.2.8.5-4.el7 将被 更新
---> 软件包 libselinux.x86_64.0.2.5-11.el7 将被 升级
--> 正在处理依赖关系 libselinux(x86-64) = 2.5-11.el7,它被软件包 libselinux-python-2.5-11.el7.x86_64 需要
---> 软件包 libselinux.x86_64.0.2.5-14.1.el7 将被 更新
---> 软件包 libselinux-utils.x86_64.0.2.5-11.el7 将被 升级
---> 软件包 libselinux-utils.x86_64.0.2.5-14.1.el7 将被 更新
---> 软件包 libsepol.x86_64.0.2.5-6.el7 将被 升级
---> 软件包 libsepol.x86_64.0.2.5-10.el7 将被 更新
--> 正在检查事务
---> 软件包 audit.x86_64.0.2.7.6-3.el7 将被 升级
---> 软件包 audit.x86_64.0.2.8.5-4.el7 将被 更新
---> 软件包 libselinux-python.x86_64.0.2.5-11.el7 将被 升级
---> 软件包 libselinux-python.x86_64.0.2.5-14.1.el7 将被 更新
--> 解决依赖关系完成
依赖关系解决
====================================================================================================
Package 架构 版本 源 大小
====================================================================================================
正在安装:
docker-ce x86_64 18.06.1.ce-3.el7 docker-ce-stable 41 M
为依赖而安装:
audit-libs-python x86_64 2.8.5-4.el7 base 76 k
checkpolicy x86_64 2.5-8.el7 base 295 k
container-selinux noarch 2:2.107-3.el7 extras 39 k
libcgroup x86_64 0.41-21.el7 base 66 k
libsemanage-python x86_64 2.5-14.el7 base 113 k
policycoreutils-python x86_64 2.5-33.el7 base 457 k
python-IPy noarch 0.75-6.el7 base 32 k
setools-libs x86_64 3.3.8-4.el7 base 620 k
为依赖而更新:
audit x86_64 2.8.5-4.el7 base 256 k
audit-libs x86_64 2.8.5-4.el7 base 102 k
libselinux x86_64 2.5-14.1.el7 base 162 k
libselinux-python x86_64 2.5-14.1.el7 base 235 k
libselinux-utils x86_64 2.5-14.1.el7 base 151 k
libsemanage x86_64 2.5-14.el7 base 151 k
libsepol x86_64 2.5-10.el7 base 297 k
policycoreutils x86_64 2.5-33.el7 base 916 k
selinux-policy noarch 3.13.1-252.el7_7.6 updates 492 k
selinux-policy-targeted noarch 3.13.1-252.el7_7.6 updates 7.0 M
事务概要
====================================================================================================
安装 1 软件包 (+ 8 依赖软件包)
升级 ( 10 依赖软件包)
总下载量:52 M
Downloading packages:
Delta RPMs disabled because /usr/bin/applydeltarpm not installed.
(1/19): audit-2.8.5-4.el7.x86_64.rpm | 256 kB 00:00:02
(2/19): audit-libs-python-2.8.5-4.el7.x86_64.rpm | 76 kB 00:00:03
(3/19): libcgroup-0.41-21.el7.x86_64.rpm | 66 kB 00:00:00
(4/19): libselinux-2.5-14.1.el7.x86_64.rpm | 162 kB 00:00:00
(5/19): checkpolicy-2.5-8.el7.x86_64.rpm | 295 kB 00:00:04
(6/19): libselinux-python-2.5-14.1.el7.x86_64.rpm | 235 kB 00:00:00
(7/19): libsemanage-2.5-14.el7.x86_64.rpm | 151 kB 00:00:00
(8/19): libselinux-utils-2.5-14.1.el7.x86_64.rpm | 151 kB 00:00:00
(9/19): libsemanage-python-2.5-14.el7.x86_64.rpm | 113 kB 00:00:00
(10/19): audit-libs-2.8.5-4.el7.x86_64.rpm | 102 kB 00:00:05
(11/19): policycoreutils-python-2.5-33.el7.x86_64.rpm | 457 kB 00:00:00
(12/19): python-IPy-0.75-6.el7.noarch.rpm | 32 kB 00:00:00
(13/19): libsepol-2.5-10.el7.x86_64.rpm | 297 kB 00:00:00
(14/19): selinux-policy-3.13.1-252.el7_7.6.noarch.rpm | 492 kB 00:00:00
(15/19): container-selinux-2.107-3.el7.noarch.rpm | 39 kB 00:00:05
(16/19): setools-libs-3.3.8-4.el7.x86_64.rpm | 620 kB 00:00:00
(17/19): policycoreutils-2.5-33.el7.x86_64.rpm | 916 kB 00:00:02
(18/19): selinux-policy-targeted-3.13.1-252.el7_7.6.noarch.rpm | 7.0 MB 00:00:05
warning: /var/cache/yum/x86_64/7/docker-ce-stable/packages/docker-ce-18.06.1.ce-3.el7.x86_64.rpm: Header V4 RSA/SHA512 Signature, key ID 621e9f35: NOKEY
docker-ce-18.06.1.ce-3.el7.x86_64.rpm 的公钥尚未安装
(19/19): docker-ce-18.06.1.ce-3.el7.x86_64.rpm | 41 MB 00:00:16
----------------------------------------------------------------------------------------------------
总计 2.6 MB/s | 52 MB 00:00:20
从 https://mirrors.aliyun.com/docker-ce/linux/centos/gpg 检索密钥
导入 GPG key 0x621E9F35:
用户ID : "Docker Release (CE rpm) <docker@docker.com>"
指纹 : 060a 61c5 1b55 8a7f 742b 77aa c52f eb6b 621e 9f35
来自 : https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
正在更新 : libsepol-2.5-10.el7.x86_64 1/29
正在更新 : libselinux-2.5-14.1.el7.x86_64 2/29
正在更新 : audit-libs-2.8.5-4.el7.x86_64 3/29
正在更新 : libsemanage-2.5-14.el7.x86_64 4/29
正在更新 : libselinux-utils-2.5-14.1.el7.x86_64 5/29
正在更新 : policycoreutils-2.5-33.el7.x86_64 6/29
正在更新 : selinux-policy-3.13.1-252.el7_7.6.noarch 7/29
正在安装 : libcgroup-0.41-21.el7.x86_64 8/29
正在更新 : selinux-policy-targeted-3.13.1-252.el7_7.6.noarch 9/29
正在安装 : libsemanage-python-2.5-14.el7.x86_64 10/29
正在安装 : audit-libs-python-2.8.5-4.el7.x86_64 11/29
正在安装 : setools-libs-3.3.8-4.el7.x86_64 12/29
正在更新 : libselinux-python-2.5-14.1.el7.x86_64 13/29
正在安装 : python-IPy-0.75-6.el7.noarch 14/29
正在安装 : checkpolicy-2.5-8.el7.x86_64 15/29
正在安装 : policycoreutils-python-2.5-33.el7.x86_64 16/29
正在安装 : 2:container-selinux-2.107-3.el7.noarch 17/29
setsebool: SELinux is disabled.
正在安装 : docker-ce-18.06.1.ce-3.el7.x86_64 18/29
正在更新 : audit-2.8.5-4.el7.x86_64 19/29
清理 : selinux-policy-targeted-3.13.1-166.el7.noarch 20/29
清理 : selinux-policy-3.13.1-166.el7.noarch 21/29
清理 : policycoreutils-2.5-17.1.el7.x86_64 22/29
清理 : libsemanage-2.5-8.el7.x86_64 23/29
清理 : libselinux-utils-2.5-11.el7.x86_64 24/29
清理 : libselinux-python-2.5-11.el7.x86_64 25/29
清理 : libselinux-2.5-11.el7.x86_64 26/29
清理 : audit-2.7.6-3.el7.x86_64 27/29
清理 : audit-libs-2.7.6-3.el7.x86_64 28/29
清理 : libsepol-2.5-6.el7.x86_64 29/29
验证中 : 2:container-selinux-2.107-3.el7.noarch 1/29
验证中 : policycoreutils-2.5-33.el7.x86_64 2/29
验证中 : audit-libs-2.8.5-4.el7.x86_64 3/29
验证中 : checkpolicy-2.5-8.el7.x86_64 4/29
验证中 : selinux-policy-3.13.1-252.el7_7.6.noarch 5/29
验证中 : python-IPy-0.75-6.el7.noarch 6/29
验证中 : setools-libs-3.3.8-4.el7.x86_64 7/29
验证中 : policycoreutils-python-2.5-33.el7.x86_64 8/29
验证中 : audit-2.8.5-4.el7.x86_64 9/29
验证中 : docker-ce-18.06.1.ce-3.el7.x86_64 10/29
验证中 : libsemanage-python-2.5-14.el7.x86_64 11/29
验证中 : libsemanage-2.5-14.el7.x86_64 12/29
验证中 : libsepol-2.5-10.el7.x86_64 13/29
验证中 : audit-libs-python-2.8.5-4.el7.x86_64 14/29
验证中 : libselinux-python-2.5-14.1.el7.x86_64 15/29
验证中 : libselinux-utils-2.5-14.1.el7.x86_64 16/29
验证中 : selinux-policy-targeted-3.13.1-252.el7_7.6.noarch 17/29
验证中 : libselinux-2.5-14.1.el7.x86_64 18/29
验证中 : libcgroup-0.41-21.el7.x86_64 19/29
验证中 : libselinux-utils-2.5-11.el7.x86_64 20/29
验证中 : libselinux-2.5-11.el7.x86_64 21/29
验证中 : libsepol-2.5-6.el7.x86_64 22/29
验证中 : selinux-policy-3.13.1-166.el7.noarch 23/29
验证中 : audit-libs-2.7.6-3.el7.x86_64 24/29
验证中 : audit-2.7.6-3.el7.x86_64 25/29
验证中 : policycoreutils-2.5-17.1.el7.x86_64 26/29
验证中 : libsemanage-2.5-8.el7.x86_64 27/29
验证中 : libselinux-python-2.5-11.el7.x86_64 28/29
验证中 : selinux-policy-targeted-3.13.1-166.el7.noarch 29/29
已安装:
docker-ce.x86_64 0:18.06.1.ce-3.el7
作为依赖被安装:
audit-libs-python.x86_64 0:2.8.5-4.el7 checkpolicy.x86_64 0:2.5-8.el7
container-selinux.noarch 2:2.107-3.el7 libcgroup.x86_64 0:0.41-21.el7
libsemanage-python.x86_64 0:2.5-14.el7 policycoreutils-python.x86_64 0:2.5-33.el7
python-IPy.noarch 0:0.75-6.el7 setools-libs.x86_64 0:3.3.8-4.el7
作为依赖被升级:
audit.x86_64 0:2.8.5-4.el7 audit-libs.x86_64 0:2.8.5-4.el7
libselinux.x86_64 0:2.5-14.1.el7 libselinux-python.x86_64 0:2.5-14.1.el7
libselinux-utils.x86_64 0:2.5-14.1.el7 libsemanage.x86_64 0:2.5-14.el7
libsepol.x86_64 0:2.5-10.el7 policycoreutils.x86_64 0:2.5-33.el7
selinux-policy.noarch 0:3.13.1-252.el7_7.6 selinux-policy-targeted.noarch 0:3.13.1-252.el7_7.6
完毕!
[root@k8s-master ~]#
安装的时候如果报错:
Transaction check error:
file /usr/bin/docker from install of docker-ce-18.06.1.ce-3.el7.x86_64 conflicts with file from package docker-ce-cli-1:19.03.8-3.el7.x86_64
file /usr/share/bash-completion/completions/docker from install of docker-ce-18.06.1.ce-3.el7.x86_64 conflicts with file from package docker-ce-cli-1:19.03.8-3.el7.x86_64
先卸载错误中的版本,再次安装:
[root@k8s-master ~]# yum erase docker-ce-cli-1:19.03.8-3.el7.x86_64
已加载插件:fastestmirror
正在解决依赖关系
--> 正在检查事务
---> 软件包 docker-ce-cli.x86_64.1.19.03.8-3.el7 将被 删除
--> 解决依赖关系完成
依赖关系解决
===================================================================================================================================================
Package 架构 版本 源 大小
===================================================================================================================================================
正在删除:
docker-ce-cli x86_64 1:19.03.8-3.el7 @docker-ce-stable 169 M
事务概要
===================================================================================================================================================
移除 1 软件包
安装大小:169 M
是否继续?[y/N]:y
Downloading packages:
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
正在删除 : 1:docker-ce-cli-19.03.8-3.el7.x86_64 1/1
验证中 : 1:docker-ce-cli-19.03.8-3.el7.x86_64 1/1
删除:
docker-ce-cli.x86_64 1:19.03.8-3.el7
完毕!
[root@k8s-master ~]# yum -y install docker-ce-18.06.1.ce-3.el7
[root@k8s-master ~]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@k8s-master ~]# systemctl start docker
[root@k8s-master ~]# docker --version
Docker version 18.06.1-ce, build e68fc7a
设置阿里云镜像加速:
[root@k8s-master ~]# cat > /etc/docker/daemon.json << EOF
{
"registry-mirrors": ["https://l5s01364.mirror.aliyuncs.com"]
}
EOF
[root@k8s-master ~]# systemctl daemon-reload
[root@k8s-master ~]# systemctl restart docker
[root@k8s-master ~]#
9.添加阿里云yum软件源:
[root@k8s-master ~]# cat > /etc/yum.repos.d/kubernetes.repo << EOF
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=0
> repo_gpgcheck=0
> gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF
[root@k8s-master ~]#
10.安装kubeadm,kubelet和kubectl:
[root@k8s-master ~]# yum install -y kubelet-1.17.0 kubeadm-1.17.0 kubectl-1.17.0
已加载插件:fastestmirror
kubernetes | 1.4 kB 00:00:00
kubernetes/primary | 66 kB 00:00:00
Loading mirror speeds from cached hostfile
* base: mirror.lzu.edu.cn
* extras: mirror.lzu.edu.cn
* updates: mirror.lzu.edu.cn
kubernetes 484/484
正在解决依赖关系
--> 正在检查事务
---> 软件包 kubeadm.x86_64.0.1.17.0-0 将被 安装
--> 正在处理依赖关系 kubernetes-cni >= 0.7.5,它被软件包 kubeadm-1.17.0-0.x86_64 需要
--> 正在处理依赖关系 cri-tools >= 1.13.0,它被软件包 kubeadm-1.17.0-0.x86_64 需要
---> 软件包 kubectl.x86_64.0.1.17.0-0 将被 安装
---> 软件包 kubelet.x86_64.0.1.17.0-0 将被 安装
--> 正在处理依赖关系 socat,它被软件包 kubelet-1.17.0-0.x86_64 需要
--> 正在处理依赖关系 conntrack,它被软件包 kubelet-1.17.0-0.x86_64 需要
--> 正在检查事务
---> 软件包 conntrack-tools.x86_64.0.1.4.4-5.el7_7.2 将被 安装
--> 正在处理依赖关系 libnetfilter_cttimeout.so.1(LIBNETFILTER_CTTIMEOUT_1.1)(64bit),它被软件包 conntrack-tools-1.4.4-5.el7_7.2.x86_64 需要
--> 正在处理依赖关系 libnetfilter_cttimeout.so.1(LIBNETFILTER_CTTIMEOUT_1.0)(64bit),它被软件包 conntrack-tools-1.4.4-5.el7_7.2.x86_64 需要
--> 正在处理依赖关系 libnetfilter_cthelper.so.0(LIBNETFILTER_CTHELPER_1.0)(64bit),它被软件包 conntrack-tools-1.4.4-5.el7_7.2.x86_64 需要
--> 正在处理依赖关系 libnetfilter_queue.so.1()(64bit),它被软件包 conntrack-tools-1.4.4-5.el7_7.2.x86_64 需要
--> 正在处理依赖关系 libnetfilter_cttimeout.so.1()(64bit),它被软件包 conntrack-tools-1.4.4-5.el7_7.2.x86_64 需要
--> 正在处理依赖关系 libnetfilter_cthelper.so.0()(64bit),它被软件包 conntrack-tools-1.4.4-5.el7_7.2.x86_64 需要
---> 软件包 cri-tools.x86_64.0.1.13.0-0 将被 安装
---> 软件包 kubernetes-cni.x86_64.0.0.7.5-0 将被 安装
---> 软件包 socat.x86_64.0.1.7.3.2-2.el7 将被 安装
--> 正在检查事务
---> 软件包 libnetfilter_cthelper.x86_64.0.1.0.0-10.el7_7.1 将被 安装
---> 软件包 libnetfilter_cttimeout.x86_64.0.1.0.0-6.el7_7.1 将被 安装
---> 软件包 libnetfilter_queue.x86_64.0.1.0.2-2.el7_2 将被 安装
--> 解决依赖关系完成
依赖关系解决
====================================================================================================
Package 架构 版本 源 大小
====================================================================================================
正在安装:
kubeadm x86_64 1.17.0-0 kubernetes 8.7 M
kubectl x86_64 1.17.0-0 kubernetes 9.4 M
kubelet x86_64 1.17.0-0 kubernetes 20 M
为依赖而安装:
conntrack-tools x86_64 1.4.4-5.el7_7.2 updates 187 k
cri-tools x86_64 1.13.0-0 kubernetes 5.1 M
kubernetes-cni x86_64 0.7.5-0 kubernetes 10 M
libnetfilter_cthelper x86_64 1.0.0-10.el7_7.1 updates 18 k
libnetfilter_cttimeout x86_64 1.0.0-6.el7_7.1 updates 18 k
libnetfilter_queue x86_64 1.0.2-2.el7_2 base 23 k
socat x86_64 1.7.3.2-2.el7 base 290 k
事务概要
====================================================================================================
安装 3 软件包 (+7 依赖软件包)
总下载量:54 M
安装大小:243 M
Downloading packages:
(1/10): conntrack-tools-1.4.4-5.el7_7.2.x86_64.rpm | 187 kB 00:00:00
(2/10): 14bfe6e75a9efc8eca3f638eb22c7e2ce759c67f95b43b16fae4ebabde1549f3-cri | 5.1 MB 00:00:02
(3/10): bf67b612b185159556555b03e1e3a1ac5b10096afe48e4a7b7f5f9c4542238eb-kub | 9.4 MB 00:00:02
(4/10): 7d9e0a47eb6eaf5322bd45f05a2360a033c29845543a4e76821ba06becdca6fd-kub | 20 MB 00:00:04
(5/10): 548a0dcd865c16a50980420ddfa5fbccb8b59621179798e6dc905c9bf8af3b34-kub | 10 MB 00:00:01
(6/10): libnetfilter_queue-1.0.2-2.el7_2.x86_64.rpm | 23 kB 00:00:00
(7/10): libnetfilter_cthelper-1.0.0-10.el7_7.1.x86_64.rpm | 18 kB 00:00:00
(8/10): libnetfilter_cttimeout-1.0.0-6.el7_7.1.x86_64.rpm | 18 kB 00:00:00
(9/10): socat-1.7.3.2-2.el7.x86_64.rpm | 290 kB 00:00:00
(10/10): 2c6d2fa074d044b3c58ce931349e74c25427f173242c6a5624f0f789e329bc75-ku | 8.7 MB 00:00:14
----------------------------------------------------------------------------------------------------
总计 3.7 MB/s | 54 MB 00:00:14
Running transaction check
Running transaction test
Transaction test succeeded
Running transaction
正在安装 : libnetfilter_cttimeout-1.0.0-6.el7_7.1.x86_64 1/10
正在安装 : socat-1.7.3.2-2.el7.x86_64 2/10
正在安装 : cri-tools-1.13.0-0.x86_64 3/10
正在安装 : libnetfilter_queue-1.0.2-2.el7_2.x86_64 4/10
正在安装 : libnetfilter_cthelper-1.0.0-10.el7_7.1.x86_64 5/10
正在安装 : conntrack-tools-1.4.4-5.el7_7.2.x86_64 6/10
正在安装 : kubernetes-cni-0.7.5-0.x86_64 7/10
正在安装 : kubelet-1.17.0-0.x86_64 8/10
正在安装 : kubectl-1.17.0-0.x86_64 9/10
正在安装 : kubeadm-1.17.0-0.x86_64 10/10
验证中 : kubectl-1.17.0-0.x86_64 1/10
验证中 : libnetfilter_cthelper-1.0.0-10.el7_7.1.x86_64 2/10
验证中 : conntrack-tools-1.4.4-5.el7_7.2.x86_64 3/10
验证中 : libnetfilter_queue-1.0.2-2.el7_2.x86_64 4/10
验证中 : kubelet-1.17.0-0.x86_64 5/10
验证中 : cri-tools-1.13.0-0.x86_64 6/10
验证中 : kubernetes-cni-0.7.5-0.x86_64 7/10
验证中 : socat-1.7.3.2-2.el7.x86_64 8/10
验证中 : libnetfilter_cttimeout-1.0.0-6.el7_7.1.x86_64 9/10
验证中 : kubeadm-1.17.0-0.x86_64 10/10
已安装:
kubeadm.x86_64 0:1.17.0-0 kubectl.x86_64 0:1.17.0-0 kubelet.x86_64 0:1.17.0-0
作为依赖被安装:
conntrack-tools.x86_64 0:1.4.4-5.el7_7.2 cri-tools.x86_64 0:1.13.0-0
kubernetes-cni.x86_64 0:0.7.5-0 libnetfilter_cthelper.x86_64 0:1.0.0-10.el7_7.1
libnetfilter_cttimeout.x86_64 0:1.0.0-6.el7_7.1 libnetfilter_queue.x86_64 0:1.0.2-2.el7_2
socat.x86_64 0:1.7.3.2-2.el7
完毕!
[root@k8s-master ~]#
然后设置kubelet开机自启动,不要设置启动kubelet:
[root@k8s-master ~]# systemctl enable kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
[root@k8s-master ~]#
11.使用kubeadm安装(注意:接下来的步骤在master进行操作):
[root@k8s-master ~]# kubeadm init \
> --apiserver-advertise-address=192.168.31.61 \
> --image-repository registry.aliyuncs.com/google_containers \
> --kubernetes-version v1.17.0 \
> --service-cidr=10.96.0.0/12 \
> --pod-network-cidr=10.244.0.0/16
W0416 13:58:38.475124 12941 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0416 13:58:38.475243 12941 validation.go:28] Cannot validate kubelet config - no validator is available
[init] Using Kubernetes version: v1.17.0
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.31.61]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.31.61 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master localhost] and IPs [192.168.31.61 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
W0416 14:00:47.504439 12941 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[control-plane] Creating static Pod manifest for "kube-scheduler"
W0416 14:00:47.505410 12941 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 99.180880 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.17" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: fieyef.404f7pggn52d3fxv
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.31.61:6443 --token fieyef.404f7pggn52d3fxv \
--discovery-token-ca-cert-hash sha256:ea14cbb3cda2026ed72999cc2ccef82790d2096436ad7d50e35e98ac4570dcc2
[root@k8s-master ~]#
然后根据给出的提示进行操作:
[root@k8s-master ~]# mkdir -p $HOME/.kube
[root@k8s-master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@k8s-master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@k8s-master ~]#
12.安装,首先下载kube-flannel.yml文件并进入修改如下内容:
下载kube-flannel.yml文件如果连接不成功,多尝试几次:
[root@k8s-master ~]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
--2020-04-16 14:04:30-- https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
正在解析主机 raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.108.133
正在连接 raw.githubusercontent.com (raw.githubusercontent.com)|151.101.108.133|:443... 已连接。
已发出 HTTP 请求,正在等待回应... 200 OK
长度:14416 (14K) [text/plain]
正在保存至: “kube-flannel.yml”
100%[==========================================================>] 14,416 --.-K/s 用时 0.1s
2020-04-16 14:04:31 (126 KB/s) - 已保存 “kube-flannel.yml” [14416/14416])
[root@k8s-master ~]# vim kube-flannel.yml
image: quay.io/coreos/flannel:v0.12.0-amd64
替换为
image: hanshuaiping/flannel:v0.11.0-amd64
image: quay.io/coreos/flannel:v0.12.0-amd64
替换为
image: hanshuaiping/flannel:v0.11.0-amd64
[root@k8s-master ~]# cat kube-flannel.yml
(部分已经省略)
hostNetwork: true
tolerations:
- operator: Exists
effect: NoSchedule
serviceAccountName: flannel
initContainers:
- name: install-cni
image: hanshuaiping/flannel:v0.11.0-amd64
command:
- cp
args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
containers:
- name: kube-flannel
image: hanshuaiping/flannel:v0.11.0-amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
resources:
requests:
cpu: "100m"
memory: "50Mi"
[root@k8s-master ~]#
使文件生效:
[root@k8s-master ~]# kubectl apply -f kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds-amd64 created
daemonset.apps/kube-flannel-ds-arm64 created
daemonset.apps/kube-flannel-ds-arm created
daemonset.apps/kube-flannel-ds-ppc64le created
daemonset.apps/kube-flannel-ds-s390x created
[root@k8s-master ~]#
查看pods信息(重点查看kube-flannel-ds-amd64-vf7fb是否为READY状态,因为需要下载文件,不显示READY状态的话多查看几次):
[root@k8s-master ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-9d85f5447-fxfk7 0/1 Pending 0 2m47s
coredns-9d85f5447-zsvv5 0/1 Pending 0 2m47s
etcd-k8s-master 1/1 Running 0 3m4s
kube-apiserver-k8s-master 1/1 Running 0 3m4s
kube-controller-manager-k8s-master 1/1 Running 0 3m3s
kube-flannel-ds-amd64-sftbm 0/1 Init:0/1 0 23s
kube-proxy-nr7m4 1/1 Running 0 2m47s
kube-scheduler-k8s-master 1/1 Running 0 3m4s
[root@k8s-master ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-9d85f5447-fxfk7 0/1 Pending 0 2m48s
coredns-9d85f5447-zsvv5 0/1 Pending 0 2m48s
etcd-k8s-master 1/1 Running 0 3m5s
kube-apiserver-k8s-master 1/1 Running 0 3m5s
kube-controller-manager-k8s-master 1/1 Running 0 3m4s
kube-flannel-ds-amd64-sftbm 1/1 Running 0 24s
kube-proxy-nr7m4 1/1 Running 0 2m48s
kube-scheduler-k8s-master 1/1 Running 0 3m5s
查看(查看master是否为Ready状态):
[root@k8s-master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master Ready master 8m19s v1.17.0
[root@k8s-master ~]#
在两个node节点执行第11步中Then you can join any number of worker nodes by running the following on each as root:下面的指令:
node1:
[root@k8s-node1 ~]# kubeadm join 192.168.31.61:6443 --token fieyef.404f7pggn52d3fxv \
> --discovery-token-ca-cert-hash sha256:ea14cbb3cda2026ed72999cc2ccef82790d2096436ad7d50e35e98ac4570dcc2
W0416 14:13:58.827579 3743 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.17" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[root@k8s-node1 ~]#
node2:
[root@k8s-node2 ~]# kubeadm join 192.168.31.61:6443 --token fieyef.404f7pggn52d3fxv \
> --discovery-token-ca-cert-hash sha256:ea14cbb3cda2026ed72999cc2ccef82790d2096436ad7d50e35e98ac4570dcc2
W0416 14:14:02.444693 3797 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.17" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[root@k8s-node2 ~]#
执行完之后在master节点执行kubectl get node查看信息,因为需要下载文件,不是READY状态的话多查看几次:
[root@k8s-master ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-9d85f5447-fxfk7 1/1 Running 0 6m7s
coredns-9d85f5447-zsvv5 1/1 Running 0 6m7s
etcd-k8s-master 1/1 Running 0 6m24s
kube-apiserver-k8s-master 1/1 Running 0 6m24s
kube-controller-manager-k8s-master 1/1 Running 0 6m23s
kube-flannel-ds-amd64-45qqs 1/1 Running 0 64s
kube-flannel-ds-amd64-4bb6b 0/1 Init:0/1 0 56s
kube-flannel-ds-amd64-sftbm 1/1 Running 0 3m43s
kube-proxy-2s5n2 1/1 Running 0 64s
kube-proxy-nr7m4 1/1 Running 0 6m7s
kube-proxy-wkglz 1/1 Running 0 56s
kube-scheduler-k8s-master 1/1 Running 0 6m24s
[root@k8s-master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master Ready master 6m39s v1.17.0
k8s-node1 NotReady <none> 70s v1.17.0
k8s-node2 NotReady <none> 62s v1.17.0
[root@k8s-master ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-9d85f5447-fxfk7 1/1 Running 0 6m22s
coredns-9d85f5447-zsvv5 1/1 Running 0 6m22s
etcd-k8s-master 1/1 Running 0 6m39s
kube-apiserver-k8s-master 1/1 Running 0 6m39s
kube-controller-manager-k8s-master 1/1 Running 0 6m38s
kube-flannel-ds-amd64-45qqs 1/1 Running 0 79s
kube-flannel-ds-amd64-4bb6b 1/1 Running 0 71s
kube-flannel-ds-amd64-sftbm 1/1 Running 0 3m58s
kube-proxy-2s5n2 1/1 Running 0 79s
kube-proxy-nr7m4 1/1 Running 0 6m22s
kube-proxy-wkglz 1/1 Running 0 71s
kube-scheduler-k8s-master 1/1 Running 0 6m39s
[root@k8s-master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master Ready master 6m50s v1.17.0
k8s-node1 Ready <none> 81s v1.17.0
k8s-node2 Ready <none> 73s v1.17.0
[root@k8s-master ~]#
13.安装Dashboard(在master执行):
[root@k8s-master ~]# kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta8/aio/deploy/recommended.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
[root@k8s-master ~]#
查看dashboard,如果还处在NotReady就多查看几次:
[root@k8s-master ~]# kubectl get pods -n kubernetes-dashboard
NAME READY STATUS RESTARTS AGE
dashboard-metrics-scraper-76585494d8-vsdgm 1/1 Running 0 96s
kubernetes-dashboard-5996555fd8-g898v 1/1 Running 0 96s
[root@k8s-master ~]#
下载recommended.yaml文件并修改spec下的内容:
[root@k8s-master ~]# wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta8/aio/deploy/recommended.yaml
--2020-04-16 15:00:49-- https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta8/aio/deploy/recommended.yaml
正在解析主机 raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.108.133
正在连接 raw.githubusercontent.com (raw.githubusercontent.com)|151.101.108.133|:443... 已连接。
已发出 HTTP 请求,正在等待回应... 200 OK
长度:7568 (7.4K) [text/plain]
正在保存至: “recommended.yaml”
100%[=========================================================>] 7,568 --.-K/s 用时 0.004s
2020-04-16 15:00:49 (1.67 MB/s) - 已保存 “recommended.yaml” [7568/7568])
查看文件内容,只显示修改部分,其余部分已经省略:
[root@k8s-master ~]# cat recommended.yaml
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 30001
selector:
k8s-app: kubernetes-dashboard
---
执行命令使文件生效:
[root@k8s-master ~]# kubectl apply -f recommended.yaml
namespace/kubernetes-dashboard unchanged
serviceaccount/kubernetes-dashboard unchanged
service/kubernetes-dashboard configured
secret/kubernetes-dashboard-certs unchanged
secret/kubernetes-dashboard-csrf configured
Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
secret/kubernetes-dashboard-key-holder configured
configmap/kubernetes-dashboard-settings unchanged
role.rbac.authorization.k8s.io/kubernetes-dashboard unchanged
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard unchanged
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard unchanged
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard unchanged
deployment.apps/kubernetes-dashboard unchanged
service/dashboard-metrics-scraper unchanged
deployment.apps/dashboard-metrics-scraper unchanged
14.查看kubetrnetes后台界面:
进入浏览器并输入node1节点IP信息(注意前面加上https://):
制作token:
[root@k8s-master ~]# kubectl create serviceaccount dashboard-admin -n kube-system
serviceaccount/dashboard-admin created
[root@k8s-master ~]# kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin
clusterrolebinding.rbac.authorization.k8s.io/dashboard-admin created
[root@k8s-master ~]# kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}')
Name: attachdetach-controller-token-rdgwj
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: attachdetach-controller
kubernetes.io/service-account.uid: 73f61dbc-997b-4047-8272-c1609f45d1a0
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1025 bytes
namespace: 11 bytes
token: eyJhbGciOiJSUzI1NiIsImtpZCI6IkM5a3ZkelJBVjVEOGdCbThkaFlkR3hIVEZrNmdxbkpkb3BvTTA1RmgyYncifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhdHRhY2hkZXRhY2gtY29udHJvbGxlci10b2tlbi1yZGd3aiIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJhdHRhY2hkZXRhY2gtY29udHJvbGxlciIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjczZjYxZGJjLTk5N2ItNDA0Ny04MjcyLWMxNjA5ZjQ1ZDFhMCIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlLXN5c3RlbTphdHRhY2hkZXRhY2gtY29udHJvbGxlciJ9.3_UuE4MOm41Bxkd5qoEJ2-xrxkyzE7Ja2telFS4ABJTfUpy8OVvrAFp-Whcarf2RpqTVPLcJTcfvUFQi3rioOu2NPNhHYhocR4nAbxKej0XkuNEcOPe3I_5hblLQIgeNTIE4wZiIO15kNENxxU5m5DPX3C4vAz2cCtnNQpfj8u3k-fXviaqGcaOjzhoebq1gv4N2ho15uK-Rcaf8ZDoVzso5bjYjnAUAv2NxWZ9vZJNUUV_KSjjeGFeiJUAVYfGFrKfDJgMWhDaX5bDKL9VjVQ9lF21VbH6DIcRGNcHjkaTOPmqkAWxXLBQjA9oF6EQaqDVTv9eqtiKVlpgK2sBupw
打开浏览器:
选择高级:
接受风险并继续,输入命令行中生成的token:
至此,kubernetes安装完成:
如有疑问请联系WX,备注"kubernetes"。方便日后一起学习讨论,随时欢迎指正本文章: