Kubernetes(二):集群安装

一、环境拓扑

在这里插入图片描述

二、集群环境初始化

设置系统主机名以及Host文件的相互解析

[root@master ~]# vim /etc/hosts
192.168.182.100   master
192.168.182.101   worker1
192.168.182.102   worker2

安装依赖包

[root@master ~]# yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget  vim net-tools git

设置防火墙为Iptables并设置空规则

[root@master ~]# systemctl stop firewalld && systemctl disable firewalld

[root@master ~]# yum -y install iptables-services  && systemctl  start iptables  &&  systemctl  enable iptables && iptables -F  &&  service iptables save

关闭SWAPSELINUX

[root@master ~]# swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

[root@master ~]# setenforce 0 && sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

调整内核参数,对于K8S

[root@master ~]# cat > kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM  
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF

[root@master ~]# cp kubernetes.conf  /etc/sysctl.d/kubernetes.conf

[root@master ~]# sysctl -p /etc/sysctl.d/kubernetes.conf

调整系统时区

# 设置系统时区为 中国/上海
[root@master ~]# timedatectl set-timezone Asia/Shanghai
# 将当前的 UTC 时间写入硬件时钟
[root@master ~]# timedatectl set-local-rtc 0
# 重启依赖于系统时间的服务
[root@master ~]# systemctl restart rsyslog 

[root@master ~]# systemctl restart crond

关闭系统不需要服务

[root@master ~]# systemctl stop postfix && systemctl disable postfix

设置rsyslogdsystemd journald

# 持久化保存日志的目录
[root@master ~]# mkdir /var/log/journal 

[root@master ~]# mkdir /etc/systemd/journald.conf.d

[root@master ~]# cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent
    
# 压缩历史日志
Compress=yes
 
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
 
# 最大占用空间 10G
SystemMaxUse=10G
 
# 单日志文件最大 200M
SystemMaxFileSize=200M
 
# 日志保存时间 2 周
MaxRetentionSec=2week
 
# 不将日志转发到 syslog
ForwardToSyslog=no
EOF

[root@master ~]# systemctl restart systemd-journald

升级系统内核为 4.44

CentOS 7.x 系统自带的 3.10.x 内核存在一些 Bugs,导致运行的 Docker、Kubernetes 不稳定

[root@master ~]# rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

# 安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再安装一次!
[root@master ~]# yum --enablerepo=elrepo-kernel install -y kernel-lt
# 设置开机从新内核启动
[root@master ~]# grub2-set-default 'CentOS Linux (4.4.189-1.el7.elrepo.x86_64) 7 (Core)' && reboot

三、使用Kubeadm安装

kube-proxy开启ipvs的前置条件

[root@master ~]# modprobe br_netfilter

[root@master ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

[root@master ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

安装Docker软件

[root@master ~]# yum install -y yum-utils device-mapper-persistent-data lvm2

[root@master ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

[root@master ~]# yum update -y && yum install -y docker-ce
#更新完成后再次指定启动内核为4.4
[root@master ~]# grub2-set-default 'CentOS Linux (4.4.189-1.el7.elrepo.x86_64) 7 (Core)' && reboot

[root@master ~]# mkdir /etc/docker

[root@master ~]# cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
 }
}
EOF

[root@master ~]# mkdir -p /etc/systemd/system/docker.service.d

[root@master ~]# systemctl daemon-reload && systemctl restart docker && systemctl enable docker

安装Kubeadm(主从配置)

[root@master ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg 
http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

[root@master ~]# yum -y  install  kubeadm-1.15.1 kubectl-1.15.1 kubelet-1.15.1

[root@master ~]# systemctl enable kubelet.service

导入离线镜像,(在线安装需要科学上网才可,三台主机都需要配置)

[root@master ~]# wget https://linuxli.oss-cn-beijing.aliyuncs.com/Kubernetes/%E4%B8%8B%E8%BD%BD%E6%96%87%E4%BB%B6/kubeadm-basic.images.tar.gz

[root@master ~]# tar -xvf kubeadm-basic.images.tar.gz

[root@master ~]# vim image-load.sh

#!/bin/bash

ls /root/kubeadm-basic.images/ >/tmp/image-list.txt

cd /root/kubeadm-basic.images/

for i in $(cat /tmp/image-list.txt)
do

	docker load -i $i

done

rm -rf /tmp/image-list.txt

[root@master ~]# chmod +x image-load.sh && ./image-load.sh 

以上操作materworker节点均需要操作!

初始化主节点

[root@master ~]# kubeadm config print init-defaults > kubeadm-config.yaml

[root@master ~]# vim kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.182。100
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: master
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.15.1
networking:
  dnsDomain: cluster.local
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true
mode: ipvs

[root@master ~]# kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs | tee kubeadm-init.log

......

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.182.100:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:fedd3529e381951629f79fe0a5afa1ea57ba01a3f97d36a4a8c6805fbd579f4a 

[root@master ~]# mkdir -p $HOME/.kube && sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config && sudo chown $(id -u):$(id -g) $HOME/.kube/config 

[root@master ~]# kubectl get node
NAME      STATUS   ROLES    AGE     VERSION
master    NotReady    master   18h     v1.15.1

当前节点状态为未就绪状态,由于未构建flannel网络

部署网络(只在主节点上操作)

[root@master ~]# mkdir install-k8s

[root@master ~]# mv kubeadm-config.yaml kubeadm-init.log kubernetes.conf install-k8s/

[root@master ~]# mv install-k8s/ /usr/local/ && cd /usr/local/install-k8s/

[root@master ~]# mkdir core && mv * core/

[root@master ~]# mkdir plugin && cd plugin

[root@master ~]# mkdir flannel && cd flannel/

# 解析下载flannel网址域名
[root@master ~]# vim /etc/hosts
151.101.108.133 raw.githubusercontent.com

# 下载flannel所需的配置文件
[root@master ~]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

[root@master ~]# kubectl create -f kube-flannel.yml

[root@master ~]# kubectl get node
NAME      STATUS   ROLES    AGE     VERSION
master    Ready    master   18h     v1.15.1

加入主节点以及其余工作节点 (在工作节点上操作)

# 执行安装日志中的加入命令即可

[root@worker1 ~]# kubeadm join 192.168.182.100:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:fedd3529e381951629f79fe0a5afa1ea57ba01a3f97d36a4a8c6805fbd579f4a 

[root@worker2 ~]# kubeadm join 192.168.182.100:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:fedd3529e381951629f79fe0a5afa1ea57ba01a3f97d36a4a8c6805fbd579f4a 

[root@k8s-master01 flannel]# kubectl get nodes
NAME      STATUS   ROLES    AGE     VERSION
master    Ready    master   18h     v1.15.1
worker1   Ready    <none>   4h43m   v1.15.1
worker2   Ready    <none>   17h     v1.15.1

四、利用harbor创建私有仓库

部署docker并修改配置文件,并分发该配置文件至其他节点

[root@harbor ~]# cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
 },
"insecure-registries": ["https://hub.hc.com"]
}
EOF

上传harbor离线包与docker-compose

[root@harbor~]# mv docker-compose /usr/local/bin/ && chmod +x /usr/local/bin/docker-compose

[root@harbor ~]# tar -xvf harbor-offline-installer-v1.2.0.tgz

[root@harbor ~]# mv harbor /usr/local/ && cd /usr/local/harbor
[root@harbor harbor]# vim harbor.cfg

......

5 hostname = hub.linuxli.com

9 ui_url_protocol = https
......

[root@harbor harbor]# mkdir /data/cert/ &&cd /data/cert

[root@harbor cert]# openssl genrsa -des3 -out server.key 2048

[root@harbor cert]# openssl req -new -key server.key -out server.csr

[root@harbor cert]# cp server.key server.key.org

[root@harbor cert]# openssl rsa -in server.key.org -out server.key

[root@harbor cert]# openssl x509 -req -days 365 -in server.csr -signkey server.key -out server.crt

[root@harbor cert]# cd /usr/local/harbor && ./install.sh

访问网站登录登录https://hub.hc.com/harbor/sign-in,用户名:admin,密码:Harbor12345
在这里插入图片描述
利用客户端上传进行并测试

#所有节点添加hosts映射
[root@worker1 ~]# echo "192.168.182.150 hub.hc.com" >>/etc/hosts

#登录harbor
[root@worker1 ~]# docker login https://hub.hc.com

#下载一个镜像
[root@worker1 ~]# docker pull wangyanglinux/myapp:v1

#改名并上传
[root@worker1 ~]# docker tag wangyanglinux/myapp:v1 hub.hc.com/library/myapp:v1

[root@worker1 ~]# docker push hub.hc.com/library/myapp

五、测试Kubernetes集群功能

[root@k8s-master01 ~]# kubectl run nginx-deployment --image=hub.hc.com/library/myapp:v1 --port=80 --replicas=1

kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx-deployment created

[root@k8s-master01 ~]# kubectl get deployment
NAME               READY   UP-TO-DATE   AVAILABLE   AGE
nginx-deployment   1/1     1            1           26s

[root@k8s-master01 ~]# kubectl get rs
NAME                        DESIRED   CURRENT   READY   AGE
nginx-deployment-fdc8d884   1         1         1       30s

[root@k8s-master01 ~]# kubectl get pod
NAME                              READY   STATUS    RESTARTS   AGE
nginx-deployment-fdc8d884-px99m   1/1     Running   0          34s

[root@k8s-master01 ~]# kubectl get pod -o wide
NAME                              READY   STATUS    RESTARTS   AGE   IP           NODE         NOMINATED NODE   READINESS GATES
nginx-deployment-fdc8d884-px99m   1/1     Running   0          37s   10.244.1.2   k8s-node01   <none>           <none>

[root@k8s-master01 ~]# curl 10.244.1.2/hostname.html

nginx-deployment-fdc8d884-px99m

#扩容副本数
[root@k8s-master01 ~]# kubectl scale --replicas=3 deployment/nginx-deployment

[root@k8s-master01 ~]# kubectl get pod
NAME                              READY   STATUS    RESTARTS   AGE
nginx-deployment-fdc8d884-br79p   1/1     Running   0          117s
nginx-deployment-fdc8d884-pq7zl   1/1     Running   0          117s
nginx-deployment-fdc8d884-px99m   1/1     Running   0          5m31s
©️2020 CSDN 皮肤主题: 酷酷鲨 设计师:CSDN官方博客 返回首页