环境说明
操作系统: centos7
主机规划:
主机名 | ip |
---|---|
k8s-master-01 | 10.0.0.10 |
k8s-node-1 | 10.0.0.21 |
k8s-node-2 | 10.0.0.22 |
系统初始化
设置DNS文件
#在不同主机设置主机名
hostnamectl set-hostname k8s-master-1
hostnamectl set-hostname k8s-node-1
hostnamectl set-hostname k8s-node-2
cat >> /etc/hosts <<EOF
10.0.0.10 k8s-master-1
10.0.0.21 k8s-node-1
10.0.0.22 k8s-node-2
EOF
1、安装依赖包
#备份yum源
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo_bak
#下载阿里yum源
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
#更新yum缓存
yum clean all
yum makecache
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git
2、设置防火墙为iptables
#关闭防火墙 firewalld
systemctl stop firewalld && systemctl disable firewalld
#下载iptables
yum install -y iptables-services && systemctl start iptables && systemctl enable iptables && iptables -F && service iptables save
3、关闭selinux和swapoff分区
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
5、 调整内核参数,对于 K8S
modprobe br_netfilter
cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
cp kubernetes.conf /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf
6、调整系统时区
# 设置系统时区为 中国/上海
timedatectl set-timezone Asia/Shanghai
# 将当前的 UTC 时间写入硬件时钟
timedatectl set-local-rtc 0
# 重启依赖于系统时间的服务
systemctl restart rsyslog
systemctl restart crond
7、关闭没用的服务
systemctl stop postfix && systemctl disable postfix
8、设置 rsyslogd 和systemd journald
mkdir /var/log/journal # 持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent
# 压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大占用空间 10G
SystemMaxUse=10G
# 单日志文件最大 200M
SystemMaxFileSize=200M
# 日志保存时间 2 周
MaxRetentionSec=2week
# 不将日志转发到 syslog
ForwardToSyslog=no
EOF
systemctl restart systemd-journald
9、升级内核到4.4
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
# 安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再安装
一次!
#yum安装内核 如果下载速度很慢, 使用你懂得代理 export all_proxy=socks5://192.168.2.102:1086
yum --enablerepo=elrepo-kernel install -y kernel-lt
# 设置开机从新内核启动
grub2-set-default 'CentOS Linux (4.4.189-1.el7.elrepo.x86_64) 7 (Core)'
reboot
安装k8s
1、kube-proxy开启ipvs的前置条件
modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
#如果内核升级成5以后的版本,修改成 modprobe -- nf_conntrack
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
2、安装docker
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install -y docker-ce
## 创建 /etc/docker 目录
mkdir /etc/docker
# 配置 daemon.
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"registry-mirrors": ["https://tosnxdv7.mirror.aliyuncs.com"],
"log-opts": {
"max-size": "100m"
}
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
# 重启docker服务
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
#查看内核是否有恢复到3.1x
uname -a
#如果是3.1x 则重新设置内核版本
grub2-set-default 'CentOS Linux (4.4.189-1.el7.elrepo.x86_64) 7 (Core)'
reboot
3、所有节点安装 Kubeadm
cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum -y install kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1
systemctl enable kubelet.service
4、初始化k8s-master-1 主节点
# 生成kubeadm-config配置文件
kubeadm config print init-defaults > kubeadm-config.yaml
vim kubeadm-config.yaml
#然后打开这个文件,修改几个参数
localAPIEndpoint:
imageRepository: registry.aliyuncs.com/google_containers #修改下镜像地址,默认的国内不能下载
advertiseAddress: 10.0.0.10 # masterip
kubernetesVersion: v1.15.1 # kubeadmin版本
networking:
podSubnet: "10.244.0.0/16" # 必须是这个ip
serviceSubnet: 10.96.0.0/12
# 然后在末尾添加如下内容,支持ipvs
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
# 初始化
kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log
#查看是否初始化成功
grep "initialized successfully" kubeadm-init.log
#初始化成功后的后续操作
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#把初始化的配置和日志保存起来
mkdir install-k8s
mv kubeadm-init.log kubeadm-config.yaml install-k8s
#默认从官方下载,但是因为墙的原因, 无法下载, 我提前把文件放在oss存储
#wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#从oss下载配置文件
wget https://linux-soft-ware.oss-cn-shenzhen.aliyuncs.com/kube-flannel.yml
kubectl apply -f kube-flannel.yml
#稍微等一下后,查看集群节点状态,如果出现下面输出,证明master节点部署成功
[root@k8s-master-1 install-k8s]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master-1 Ready master 59m v1.15.1
5、从节点初始化 k8s-node-x
# 在kubeadm-init.log 最后的日志,找到让从节点加入到主节点的命令
[root@k8s-master-1 install-k8s]# tail -2 kubeadm-init.log
kubeadm join 10.0.0.10:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:3c3fc136a696cfecb02f9f24e4ce906aa611151674b17d5cd337e4be640b6e74
#在从节点执行上面输出的命令
[root@k8s-node-1 ~]# kubeadm join 10.0.0.10:6443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:3c3fc136a696cfecb02f9f24e4ce906aa611151674b17d5cd337e4be640b6e74
[root@k8s-node-2 ~]# kubeadm join 10.0.0.10:6443 --token abcdef.0123456789abcdef \
> --discovery-token-ca-cert-hash sha256:3c3fc136a696cfecb02f9f24e4ce906aa611151674b17d5cd337e4be640b6e74
>
#查看k8s集群节点信息,出现下面的输出, 证明没问题
[root@k8s-master-1 install-k8s]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-master-1 Ready master 65m v1.15.1
k8s-node-1 Ready <none> 26m v1.15.1
k8s-node-2 Ready <none> 26m v1.15.1
安装Harbor
Harbor是企业级的私有仓库,拥有交互体验较好的WEB UI界面 , 可以把私有的镜像推送到Harbor上进行管理
安装docker
yum install -y yum-utils
yum-config-manager \
--add-repo \
http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum update -y && yum install -y docker-ce
## 创建 /etc/docker 目录
mkdir /etc/docker
# 配置 daemon.
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"registry-mirrors": ["https://tosnxdv7.mirror.aliyuncs.com"],
"log-opts": {
"max-size": "100m"
}
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
# 重启docker服务
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
安装 docker-compose
Compose 是用于定义和运行多容器 Docker 应用程序的工具。通过 Compose,您可以使用 YML 文件来配置应用程序需要的所有服务。然后,使用一个命令,就可以从 YML 文件配置中创建并启动所有服务。
Compose 使用的三个步骤:
使用 Dockerfile 定义应用程序的环境。
使用 docker-compose.yml 定义构成应用程序的服务,这样它们可以在隔离环境中一起运行。
最后,执行 docker-compose up 命令来启动并运行整个应用程序。
wget https://linux-soft-ware.oss-cn-shenzhen.aliyuncs.com/docker-compose
chmod +x docker-compose
mv docker-compose /usr/local/bin
下载Harbor安装包
#这个包就不提供出去了, 包太大了, 消息的OSS流量没多少了, 需要节省点
wget https://xxxxx/harbor-offline-installer-v1.2.0.tgz
tar xf harbor-offline-installer-v1.2.0.tgz
#修改安装配置
vim harbor/harbor.cfg
#设置域名
hostname = hub.mabu.com
#设置http是否加密,我们选择加密
ui_url_protocol = https
创建证书
#创建证书
mkdir /data/cert -p
cd /data/cert
#创建秘钥
[root@harbor cert]# openssl genrsa -des3 -out server.key 2048
Generating RSA private key, 2048 bit long modulus
............................................................+++
..............................................+++
e is 65537 (0x10001)
Enter pass phrase for server.key: 输入密码 123456
Verifying - Enter pass phrase for server.key: 输入密码 123456
#创建证书
[root@harbor cert]# openssl req -new -key server.key -out server.csr
Enter pass phrase for server.key: 输入密码123456
You are about to be asked to enter information that will be incorporated
into your certificate request.
What you are about to enter is what is called a Distinguished Name or a DN.
There are quite a few fields but you can leave some blank
For some fields there will be a default value,
If you enter '.', the field will be left blank.
-----
Country Name (2 letter code) [XX]:CN
State or Province Name (full name) []:GD
Locality Name (eg, city) [Default City]:SZ
Organization Name (eg, company) [Default Company Ltd]:ITMABU
Organizational Unit Name (eg, section) []:ITMABU
Common Name (eg, your name or your server's hostname) []:hub.mabu.com
Email Address []:itmabu@163.com
Please enter the following 'extra' attributes
to be sent with your certificate request
A challenge password []: 回车
An optional company name []: 回车
#让证书褪去密码
[root@harbor cert]# openssl rsa -in server.key -out server.key
Enter pass phrase for server.key.org:输入密码 123456
writing RSA key
#签名
[root@harbor cert]# openssl x509 -req -days 365 -in server.csr -signkey server.key -out server.crt
Signature ok
subject=/C=CN/ST=GD/L=SZ/O=ITMABU/OU=ITMABU/CN=hub.mabu.com/emailAddress=itmabu@163.com
Getting Private key
#修改证书权限
chmod -R 777 /data/cert
开始安装Harbor
cd ~/harbor
./install.sh
#windows本地的hosts文件追加
10.0.0.11 hub.mabu.com
尝试访问harbor
账号/密码:admin/Harbor12345
修改所有docker节点的配置
echo "10.0.0.11 hub.mabu.com" >> /etc/hosts
vim /etc/docker/daemon.json
{
#添加这一行配置,表示可信任的仓库地址,因为证书是伪造的,需要添加这行配置
"insecure-registries":["https://hub.mabu.com"]
}
#重启docker
systemctl restart docker
#登录horbar,会要求输入账号密码
docker login https://hub.mabu.com
测试Harbor镜像推送
#下载一个新镜像
docker pull nginx
#给镜像打个新tag
docker tag nginx hub.mabu.com/library/mabu-nginx:v1
#推送镜像
docker push hub.mabu.com/library/mabu-nginx:v1
检查harbor是否有新镜像
使用kubectl创建应用资源
主流是按照资源清单配置文件的形式去创建应用资源,为了演示功能,使用命令行的形式去创建
#使用kubectl创建应用资源, 副本为1
kubectl run nginx-deployment-group --image=hub.mabu.com/library/mabu-nginx:v1 --replicas=1 --port=80
#查看创建的资源信息
[root@k8s-master-1 ~]# kubectl get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deployment-group 1/1 1 1 11h
#查看创建的资源信息
[root@k8s-master-1 ~]# kubectl get rs
NAME DESIRED CURRENT READY AGE
nginx-deployment-group-5c856d4865 1 1 1 11h
[root@k8s-master-1 ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-deployment-group-5c856d4865-4475f 1/1 Running 0 11h 10.244.2.3 k8s-node-1 <none> <none>
#尝试访问资源,能访问成功,则没问题
[root@k8s-master-1 ~]# curl 10.244.2.3
#测试删除pod,k8s会不会在其他节点恢复nginx
[root@k8s-master-1 ~]# kubectl delete pod nginx-deployment-group-5c856d4865-4475f
pod "nginx-deployment-group-5c856d4865-4475f" deleted
#可以看到老的pod被移除,新的pod新建在k8s-node-2节点上
[root@k8s-master-1 ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-deployment-group-5c856d4865-6jpcl 1/1 Running 0 24s 10.244.1.5 k8s-node-2 <none> <none>
#修改副本数
[root@k8s-master-1 ~]# kubectl scale --replicas=3 deployment/nginx-deployment-group
deployment.extensions/nginx-deployment-group scaled
#查看pod的最新信息,可以看到有3个pod在运行,修改成功
[root@k8s-master-1 ~]# kubectl get pod -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-deployment-group-5c856d4865-6jpcl 1/1 Running 0 2m8s 10.244.1.5 k8s-node-2 <none> <none>
nginx-deployment-group-5c856d4865-8ttjf 1/1 Running 0 12h 10.244.2.2 k8s-node-1 <none> <none>
nginx-deployment-group-5c856d4865-fjv4m 1/1 Running 0 11h 10.244.1.4 k8s-node-2 <none> <none>
#但是出现一个问题,如何把这些nginx暴露一个不变的ip地址,供外部访问
#创建负载均衡svc服务,会暴露一个虚拟ip,这个虚拟ip会轮询3个nginx服务
[root@k8s-master-1 ~]# kubectl expose deployment nginx-deployment-group --port=8000 --target-port=80
#查看svc的信息,可以看到暴露的虚拟ip是10.107.9.117 ,端口为8000
[root@k8s-master-1 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 3h28m
nginx-deployment-group ClusterIP 10.107.9.117 <none> 8000/TCP 3m39s
#使用虚拟ip尝试访问
curl 10.107.9.117:8000
#实际上SVC底层是使用了LVS进行的负载均衡,我们可以查看LVS的配置
[root@k8s-master-1 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
...省略其他输出
TCP 10.107.9.117:8000 rr
-> 10.244.1.4:80 Masq 1 0 1
-> 10.244.1.5:80 Masq 1 0 2
-> 10.244.2.2:80 Masq 1 0 1
#我们可以看到,10.107.9.117 虚拟ip会去轮询上面的3个pod的ip地址的80端口
#但是这是内部ip的地址, 外部无法访问,需要宿主机暴露的端口才能访问
#修改svc的信息
[root@k8s-master-1 ~]# kubectl edit svc nginx-deployment-group
type: NodePort
#再次查看svc的信息, 可以看到绑定到宿主机的31249端口
[root@k8s-master-1 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 3h32m
nginx-deployment-group NodePort 10.107.9.117 <none> 8000:31249/TCP 7m53s
#测试访问,所有k8s节点的31249端口都可以访问
curl 10.0.0.10:31249
curl 10.0.0.21:31249
curl 10.0.0.22:31249