企业运维----Docker-kubernetes集群部署

21 篇文章 0 订阅
16 篇文章 0 订阅


kubernetes集群部署

环境
  1. 地址伪装让虚拟机可以上网
[root@foundation12 ~]# iptables -t nat -I POSTROUTING -s 172.25.12.0/24 -j MASQUERADE

  1. 部署harbor https://blog.csdn.net/weixin_56892849/article/details/118941730
  2. 生成证书https://blog.csdn.net/weixin_56892849/article/details/118964835
iproles
172.25.12.2server2 master
172.25.12.3server3 node
172.25.12.4server4 node
添加reg.westos.org解析和yum源环境
[root@server1 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
172.25.12.250	foundation12.ilt.example.com
172.25.12.1	server1 reg.westos.org
172.25.12.2	server2
172.25.12.3	server3
172.25.12.4	server4
172.25.12.5	server5
172.25.12.6	server6
172.25.12.7	server7
172.25.12.8	server8
[root@server1 yum.repos.d]# cd
[root@server1 ~]# scp /etc/hosts server2:/etc/
   

[root@server1 yum.repos.d]# cat docker.repo 
[docker]
name=docker-ce
baseurl=http://172.25.12.250/docker-ce
gpgcheck=0

[root@server1 yum.repos.d]# cat dvd.repo 
[dvd]
name=dvd
baseurl=http://172.25.12.250/rhel7.6
gpgcheck=0

[root@server1 yum.repos.d]# scp dvd.repo docker.repo server2:/etc/yum.repos.d/
dvd.repo                                      100%   66    45.3KB/s   00:00    
docker.repo                                   100%   76    68.6KB/s   00:00    
[root@server1 yum.repos.d]# scp dvd.repo docker.repo server3:/etc/yum.repos.d/
dvd.repo                                      100%   66    45.2KB/s   00:00    
docker.repo                                   100%   76    68.3KB/s   00:00    
[root@server1 yum.repos.d]# scp dvd.repo docker.repo server4:/etc/yum.repos.d/
dvd.repo                                      100%   66    42.5KB/s   00:00    
docker.repo                                   100%   76    73.2KB/s   00:00    

[root@server2 docker]# cd /etc/yum.repos.d/
[root@server2 yum.repos.d]# ls
docker.repo  dvd.repo  k8s.repo  redhat.repo
[root@server2 yum.repos.d]# cat k8s.repo 
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
gpgcheck=0

[root@server2 yum.repos.d]# scp k8s.repo server3:/etc/yum.repos.d/
k8s.repo                        100%  129   119.6KB/s   00:00    
[root@server2 yum.repos.d]# scp k8s.repo server4:/etc/yum.repos.d/ 
k8s.repo                        100%  129   110.6KB/s   00:00    

宿主机

[root@foundation12 ~]# cd /var/www/html/
[root@foundation12 html]# ls
3000                         docker-ce
4.0                          harbor-offline-installer-v1.10.1.tgz
apache-tomcat-7.0.90.tar.gz  nginx-1.20.1.tar.gz
compose                      rhel7.6
[root@foundation12 html]# ls docker-ce/
containerd.io-1.2.10-3.2.el7.x86_64.rpm
containerd.io-1.2.13-3.1.el7.x86_64.rpm
containerd.io-1.2.13-3.2.el7.x86_64.rpm
containerd.io-1.3.7-3.1.el7.x86_64.rpm
containerd.io-1.4.4-3.1.el7.x86_64.rpm
container-selinux-2.77-1.el7.noarch.rpm
docker-ce-18.09.9-3.el7.x86_64.rpm
docker-ce-19.03.11-3.el7.x86_64.rpm
docker-ce-19.03.12-3.el7.x86_64.rpm
docker-ce-19.03.13-3.el7.x86_64.rpm
docker-ce-19.03.15-3.el7.x86_64.rpm
docker-ce-19.03.5-3.el7.x86_64.rpm
docker-ce-19.03.8-3.el7.x86_64.rpm
docker-ce-cli-18.09.9-3.el7.x86_64.rpm
docker-ce-cli-19.03.11-3.el7.x86_64.rpm
docker-ce-cli-19.03.12-3.el7.x86_64.rpm
docker-ce-cli-19.03.13-3.el7.x86_64.rpm
docker-ce-cli-19.03.15-3.el7.x86_64.rpm
docker-ce-cli-19.03.5-3.el7.x86_64.rpm
docker-ce-cli-19.03.8-3.el7.x86_64.rpm
repodata
[root@foundation12 html]# ls 

安装docker、Kubeadm kubelet、kubect启动docker、kubelet并且设置开机自启

[root@server1 ~]# yum install -y docker-ce
[root@server2 ~]# yum install -y docker-ce
[root@server3 ~]# yum install -y docker-ce
[root@server4 ~]# yum install -y docker-ce

[root@server1 ~]# systemctl  enable --now docker
[root@server2 ~]# systemctl  enable --now docker
[root@server3 ~]# systemctl  enable --now docker
[root@server4 ~]# systemctl  enable --now docker


[root@server2 ~]# yum install -y kubelet kubeadm.x86_64 kubectl.x86_64 
[root@server2 ~]# systemctl  enable --now kubelet.service 
[root@server3 ~]# yum install -y kubelet kubeadm.x86_64 kubectl.x86_64 
[root@server3 ~]# systemctl  enable --now kubelet.service
[root@server4 ~]# yum install -y kubelet kubeadm.x86_64 kubectl.x86_64 
[root@server4 ~]# systemctl  enable --now kubelet.service 

修改 /etc/sysctl.d/k8s.conf 和docker

[root@server1 sysctl.d]# ls
99-sysctl.conf  docker.conf  k8s.conf
[root@server1 sysctl.d]# cat k8s.conf 
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@server1 sysctl.d]# cat k8s.conf 
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
[root@server1 sysctl.d]# scp docker.conf server2:/etc/sysctl.d/
docker.conf                                         100%   79    77.6KB/s   00:00    
[root@server1 sysctl.d]# scp docker.conf server2:/etc/sysctl.d/
docker.conf                                         100%   79   131.4KB/s   00:00    
[root@server1 sysctl.d]# scp docker.conf server3:/etc/sysctl.d/
docker.conf                                         100%   79    79.5KB/s   00:00    
[root@server1 sysctl.d]# scp docker.conf server4:/etc/sysctl.d/
docker.conf                                         100%   79    87.1KB/s   00:00    

[root@server1 ~]# sysctl --system
[root@server2 ~]# sysctl --system
[root@server3 ~]# sysctl --system
[root@server4 ~]# sysctl --system
禁用 swap 分区 并且 注释掉/etc/fstab 文件中的 swap 定义

[root@server2 ~]# swapoff -a 
[root@server2 ~]# vim /etc/fstab 

#/dev/mapper/rhel-swap   swap                    swap    defaults        0 0

[root@server3 ~]# swapoff -a 
[root@server3 ~]# vim /etc/fstab 

#/dev/mapper/rhel-swap   swap                    swap    defaults        0 0

[root@server4 ~]# swapoff -a 
[root@server4 ~]# vim /etc/fstab 

#/dev/mapper/rhel-swap   swap                    swap    defaults        0 0

添加项目参数

[root@server2 docker]# cat daemon.json 
{
  "registry-mirrors": ["https://reg.westos.org"],
  "exec-opts": ["native.cgroupdriver=systemd"]

}
[root@server2 docker]# pwd
/etc/docker
[root@server2 docker]# scp daemon.json server3:/etc/docker/
[root@server2 docker]# scp daemon.json server3:/etc/docker/

[root@server2 docker]# systemctl  restart docker
[root@server3 docker]# systemctl  restart docker
[root@server4 docker]# systemctl  restart docker

添加k8s项目

请添加图片描述

从网上拉取下面的镜像并且推到k8s的镜像仓库
[root@server2 ~]# ls
k8s-1.21.3.tar.gz
[root@server2 ~]# docker load -i k8s-1.21.3.tar.gz 
[root@server2 ~]# docker images
REPOSITORY                                   TAG                 IMAGE ID            CREATED             SIZE
reg.westos.org/k8s/kube-apiserver            v1.21.3             3d174f00aa39        8 days ago          126MB
reg.westos.org/k8s/kube-scheduler            v1.21.3             6be0dc1302e3        8 days ago          50.6MB
reg.westos.org/k8s/kube-proxy                v1.21.3             adb2816ea823        8 days ago          103MB
reg.westos.org/k8s/kube-controller-manager   v1.21.3             bc2bb319a703        8 days ago          120MB
flannel                                      v0.14.0             8522d622299c        2 months ago        67.9MB
reg.westos.org/k8s/pause                     3.4.1               0f8457a4c2ec        6 months ago        683kB
reg.westos.org/k8s/coredns                   v1.8.0              296a6d5035e2        9 months ago        42.5MB
reg.westos.org/k8s/etcd                      3.4.13-0            0369cf4303ff        11 months ago       253MB

[root@server2 certs.d]# mkdir -p /etc/docker/certs.d/reg.westos.org
[root@server3 certs.d]# mkdir -p /etc/docker/certs.d/reg.westos.org
[root@server4 certs.d]# mkdir -p /etc/docker/certs.d/reg.westos.org
[root@server2 reg.westos.org]# scp ca.crt server2:/etc/docker/certs.d/reg.westos.org/
[root@server3 reg.westos.org]# scp ca.crt server2:/etc/docker/certs.d/reg.westos.org/
[root@server4 reg.westos.org]# scp ca.crt server2:/etc/docker/certs.d/reg.westos.org/

[root@server2 certs.d]# docker login  reg.westos.org
Username: admin
Password: 
WARNING! Your password will be stored unencrypted in /root/.docker/config.json.
Configure a credential helper to remove this warning. See
https://docs.docker.com/engine/reference/commandline/login/#credentials-store

Login Succeeded
[root@server2 certs.d]# docker images | grep ^reg.westos.org/k8s | awk '{system("docker push "$1":"$2"")}'

请添加图片描述

Master 节点初始化
[root@server2 ~]# kubeadm init --pod-network-cidr=10.244.0.0/16 --image-repository reg.westos.org/k8s

error execution phase preflight: [preflight] Some fatal errors occurred:
	[ERROR NumCPU]: the number of available CPUs 1 is less than the required 2
	[ERROR Mem]: the system RAM (991 MB) is less than the minimum 1700 MB

报错原因:cpu和内存不够
cpu加到2个 memory加到2048MB

请添加图片描述

请添加图片描述
初始化成功,按照提示继续

[kiosk@foundation12 images]$ ssh root@172.25.12.2
root@172.25.12.2's password: 
Last login: Fri Jul 23 23:58:52 2021 from foundation12.ilt.example.com
[root@server2 ~]# kubeadm init --pod-network-cidr=10.244.0.0/16 --image-repository reg.westos.org/k8s
W0724 06:18:52.708140    3774 version.go:102] could not fetch a Kubernetes version from the internet: unable to get URL "https://dl.k8s.io/release/stable-1.txt": Get "https://dl.k8s.io/release/stable-1.txt": EOF
W0724 06:18:52.708263    3774 version.go:103] falling back to the local client version: v1.21.3
[init] Using Kubernetes version: v1.21.3
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local server2] and IPs [10.96.0.1 172.25.12.2]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost server2] and IPs [172.25.12.2 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost server2] and IPs [172.25.12.2 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 18.502704 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.21" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node server2 as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node server2 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 0oa4p4.aiqrza9kmx50f2v2
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 172.25.12.2:6443 --token 0oa4p4.aiqrza9kmx50f2v2 \
	--discovery-token-ca-cert-hash sha256:19ecdbeceee6f2d44b90995b849273e07a118c7faf1e7a0d83b904d0f4b73ac5 

将’export KUBECONFIG=/etc/kubernetes/admin.conf’加入.bash_profile

[root@server2 ~]# vim .bash_profile 
[root@server2 ~]# cat .bash_profile 
# .bash_profile

# Get the aliases and functions
if [ -f ~/.bashrc ]; then
	. ~/.bashrc
fi

# User specific environment and startup programs

PATH=$PATH:$HOME/bin

export PATH
export KUBECONFIG=/etc/kubernetes/admin.conf
[root@server2 ~]# echo "source <(kubectl completion bash)" >> ~/.bashrc 
查看节点,master处于NotReady状态
[root@server2 ~]# kubectl get nodes
NAME      STATUS     ROLES                  AGE     VERSION
server2   NotReady   control-plane,master   3m54s   v1.21.3
部署flannel网络
[root@server2 ~]# yum install -y wget
[root@server2 ~]# wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
--2021-07-24 21:19:28--  https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.111.133, 185.199.110.133, ...
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 4813 (4.7K) [text/plain]
Saving to: ‘kube-flannel.yml’

100%[===========================================================================>] 4,813       25.2KB/s   in 0.2s   

2021-07-24 21:19:30 (25.2 KB/s) - ‘kube-flannel.yml’ saved [4813/4813]

[root@server2 ~]# vim kube-flannel.yml 

        image: quay.io/coreos/flannel:v0.14.0

[root@server1 reg.westos.org]# docker pull quay.io/coreos/flannel:v0.14.0
v0.14.0: Pulling from coreos/flannel
801bfaa63ef2: Pull complete 
e4264a7179f6: Pull complete 
bc75ea45ad2e: Pull complete 
78648579d12a: Pull complete 
3393447261e4: Pull complete 
071b96dd834b: Pull complete 
4de2f0468a91: Pull complete 
Digest: sha256:4a330b2f2e74046e493b2edc30d61fdebbdddaaedcb32d62736f25be8d3c64d5
Status: Downloaded newer image for quay.io/coreos/flannel:v0.14.0
quay.io/coreos/flannel:v0.14.0
[root@server1 reg.westos.org]# docker tag quay.io/coreos/flannel:v0.14.0 reg.westos.org/library/flannel:v0.14.0
[root@server1 reg.westos.org]# docker push reg.westos.org/library/flannel:v0.14.0
The push refers to repository [reg.westos.org/library/flannel]
814fbd599e1f: Pushed 
8a984b390686: Pushed 
b613d890216c: Pushed 
eb738177d102: Pushed 
2e16188127c8: Pushed 
815dff9e0b57: Pushed 
777b2c648970: Pushed 
v0.14.0: digest: sha256:635d42b8cc6e9cb1dee3da4d5fe8bbf6a7f883c9951b660b725b0ed2c03e6bde size: 1785

请添加图片描述

修改 kube-flannel.yml

[root@server2 ~]# vim kube-flannel.yml 
169         image: flannel:v0.14.0
183         image: flannel:v0.14.0

请添加图片描述

添加minion节点
[root@server3 ~]# kubeadm join 172.25.12.2:6443 --token 0oa4p4.aiqrza9kmx50f2v2 --discovery-token-ca-cert-hash sha256:19ecdbeceee6f2d44b90995b849273e07a118c7faf1e7a0d83b904d0f4b73ac5 
[root@server4 ~]# kubeadm join 172.25.12.2:6443 --token 0oa4p4.aiqrza9kmx50f2v2 --discovery-token-ca-cert-hash sha256:19ecdbeceee6f2d44b90995b849273e07a118c7faf1e7a0d83b904d0f4b73ac5 

状态全是Ready和Running部署成功
[root@server2 ~]# kubectl get nodes
NAME      STATUS   ROLES                  AGE   VERSION
server2   Ready    control-plane,master   17h   v1.21.3
server3   Ready    <none>                 22s   v1.21.3
server4   Ready    <none>                 13s   v1.21.3
[root@server2 ~]# kubectl -n kube-system get pod
NAME                              READY   STATUS    RESTARTS   AGE
coredns-7777df944c-4ls4d          1/1     Running   0          17h
coredns-7777df944c-gwxzq          1/1     Running   0          17h
etcd-server2                      1/1     Running   0          17h
kube-apiserver-server2            1/1     Running   0          17h
kube-controller-manager-server2   1/1     Running   0          17h
kube-flannel-ds-9skt8             1/1     Running   0          61s
kube-flannel-ds-sbtr7             1/1     Running   0          65m
kube-flannel-ds-v8sth             1/1     Running   0          52s
kube-proxy-7jlnc                  1/1     Running   0          61s
kube-proxy-hpj4p                  1/1     Running   0          52s
kube-proxy-wwlxs                  1/1     Running   0          17h
kube-scheduler-server2            1/1     Running   0          17h

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 1
    评论
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值