kubernetes安装部署

kubernetes安装部署

主机名主机ip担任角色
master192.168.0.108管理节点,NTP客户端
node1192.168.0.107计算节点,NTP客户端
node2192.168.0.104计算节点,NTP客户端
node3192.168.0.105计算节点,NTP客户端
docker-harbor192.168.0.111ansible管理主机
docker镜像仓库
NTP时间服务端
yum自定义仓库

一、环境检查

#可以执行以下命令测试docker-harbor主机是否可以无密码远程管理
[root@docker-harbor ~]# ssh master
[root@docker-harbor ~]# ssh node1
[root@docker-harbor ~]# ssh node2
[root@docker-harbor ~]# ssh node3

二、环境准备

2.1 编写yaml文件
各个节点环境准备,关闭防火墙,关闭selinux,关闭交换空间
本文章使用的是ansible来部署安装k8s集群,前提条件是ansible能无密码验证远程登录节点主机

cat >  /root/yaml/init.yaml << EOF
---
- hosts: master,node               #远程管理,master,node1-node3
  tasks:
    - name: disable selinux        #永久关闭selinux,如果能永久关闭selinux,就执行handlers任务实现临时关闭selinux
      lineinfile:
        path: /etc/selinux/config
        regexp: '^SELINUX='
        line: SELINUX=disabled
      notify: stop selinux
    - name: stop firewalld          #临时关闭firewalld
      service:
        name: firewalld
        state: stopped
      notify: disable firewalld
    - name: delete Default yum file    #删除默认yum仓库配置文件
      shell: rm -rf /etc/yum.repos.d/*
    - name: copy yum file              #拷贝系统光盘的yum文件到各个主机,yum服务端指向docker-harbor
      copy:
        src: /etc/yum.repos.d/mydvd.repo
        dest: /etc/yum.repos.d/
    - shell: mount /dev/sr0 /media
      notify: mountpath
  handlers:
  - name: stop selinux            #临时关闭selinux,如果机器已经永久关闭,则不执行临时关闭
    shell: setemforce 0
  - name: disable firewalld         #永久关闭firewalld
    service:
      name: firewalld
      enabled: yes
  - name: mountpath               #实现开机自动挂载yum
    shell: echo /dev/sr0 /media iso9660 defaults 0 0 >> /etc/fstab
EOF

2.2 ansible-playbook执行yaml文件
[root@docker-harbor yaml]# ansible-playbook init.yaml

三、配置master节点,初始化集群

3.1 编写yaml文件

cat >  /root/yaml/install_k8s_master.yaml << EOF
---
- hosts: master
  vars:                                              #定义ansible变量
    master: '192.168.0.108:6443'                     #master的Apiserver的地址和端口
    image: 'registry.aliyuncs.com/google_containers' #指定使用的镜像(这里指定的是阿里云镜像)
    version: 'v1.20.4'                               #版本
    service_ip: '10.10.0.0/16'                       #指定kubernetes的service的网段
    pod_network: '10.244.0.0/16'                     #指定指定容器pod网段
  tasks:
    - name: create docker directory #先创建docker存放配置文件的目录
      file:
        path: /etc/docker
        state: directory
    - name: copy config                         #拷贝所需要的配置文件到master
      copy:
        src: "{{item.src}}"
        dest: "{{item.dest}}"
      loop:
        - { src: "/root/yaml/file/hosts", dest: "/etc/hosts" }                    #拷贝本地hosts域名解析文件到各个master节点
        - { src: "/root/yaml/file/ipvs.conf", dest: "/etc/modules-load.d" }       #拷贝加入ip_vs模块配置文件
        - { src: "/root/yaml/file/k8s.conf", dest: "/etc/sysctl.d/" }             #拷贝内核参数配置文件
        - { src: "/root/yaml/file/daemon.json", dest: "/etc/docker" }             #拷贝docker配置文件
        - { src: "/root/yaml/file/chrony.conf", dest: "/etc" }                    #拷贝NTP时间同步配置文件
        - { src: "/etc/yum.repos.d/centos-ali.repo", dest: "/etc/yum.repos.d" }   #拷贝ali_yum源
        - { src: "/etc/yum.repos.d/docker-ce.repo", dest: "/etc/yum.repos.d" }    #拷贝docker_yum源
        - { src: "/etc/yum.repos.d/kubernetes.repo", dest: "/etc/yum.repos.d" }   #拷贝kubernetes_yum源
    - shell: sysctl --system     #刷新内核参数
    - name: install server       #安装应用
      yum:
         name:
           - vim                 #vim文本编辑工具
           - bash-completion     #bash解析器
           - chrony              #NTP时间服务器
           - ipset               #安装ipset工具
           - ipvsadm             #安装ipvsadm工具
           - docker-ce           #安装docker-ce
           - kubeadm-1.20.4      #kubeadm
           - kubelet-1.20.4      #kubelet
           - kubectl-1.20.4      #kubectl
    - name: Automatically start service    #实现开机自启动并启动相应服务
      service:
        name: "{{item}}"
        state: started
        enabled: yes
      loop:
        - chronyd                          #NTP时间服务
        - systemd-modules-load.service     #开启ip_vs模块服务
        - docker.service                   #docker服务
        - kubelet.service                  #kubelet服务
    - name: Init cluster        #初始化集群并生成永久证书
      shell: "{{item}}"
      loop:
        - kubeadm init --control-plane-endpoint='{{ master }}' --image-repository '{{ image }}' --kubernetes-version '{{ version }}' --service-cidr='{{ service_ip }}' --pod-network-cidr='{{ pod_network }}' --upload-certs
        - mkdir -p $HOME/.kube
        - sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
        - sudo chown $(id -u):$(id -g) $HOME/.kube/config
        - kubeadm token delete `kubeadm token list | awk 'NR==2{print $1}'`
        - kubeadm token create --ttl=0 --print-join-command > /opt/token.txt #把加入集群命令保存一个文件中,node节点执行执行该文件的命令则加入集群
    - name: Save Certificate    #把加入集群的命令文件保存到主机docker-harbor管理主机,路径为/opt
      fetch:
        src: /opt/token.txt
        dest: /opt
    - shell: rm -rf /opt/token.txt #删除master加入集群的命令文件
EOF

3.2 ansible-playbook执行yaml文件

[root@docker-harbor yaml]# ansible-playbook install_k8s_master.yaml

3.3 验证master安装

[root@docker-harbor yaml]# ssh master
Last login: Sat Sep 9 09:07:06 2023 from 192.168.0.111
[root@master ~]# kubectl get nodes
NAME  STATUS  ROLES        AGE    VERSION
master NotReady  control-plane,master   3m52s  v1.20.4

四、安装node节点

4.1 编写yaml文件

cat >  /root/yaml/install_k8s_node.yaml << EOF
---
- hosts: node
  tasks:
    - name: create docker directory
      file:
        path: /etc/docker
        state: directory
    - name: copy config
      copy:
        src: "{{item.src}}"
        dest: "{{item.dest}}"
      loop:
        - { src: "/root/yaml/file/hosts", dest: "/etc/hosts" }
        - { src: "/root/yaml/file/ipvs.conf", dest: "/etc/modules-load.d" }
        - { src: "/root/yaml/file/k8s.conf", dest: "/etc/sysctl.d/" }
        - { src: "/root/yaml/file/daemon.json", dest: "/etc/docker" }
        - { src: "/root/yaml/file/chrony.conf", dest: "/etc" }
        - { src: "/etc/yum.repos.d/centos-ali.repo", dest: "/etc/yum.repos.d" }
        - { src: "/etc/yum.repos.d/docker-ce.repo", dest: "/etc/yum.repos.d" }
        - { src: "/etc/yum.repos.d/kubernetes.repo", dest: "/etc/yum.repos.d" }
    - shell: sysctl --system
    - name: install server
      yum:
         name:
           - vim
           - bash-completion
           - chrony
           - ipset
           - ipvsadm
           - docker-ce
           - kubeadm-1.20.4
           - kubelet-1.20.4
           - kubectl-1.20.4
    - name: Automatically start service
      service:
        name: "{{item}}"
        state: started
        enabled: yes
      loop:
        - chronyd
        - systemd-modules-load.service
        - docker.service
        - kubelet.service
    - name: copy join file             #把docker-harbor管理主机加入集群的命令文件拷贝到node节点
      copy:
        src: /opt/master/opt/token.txt
        dest: /opt/
    - name: Join the cluster           #执行加入集群,并删除该文件
      shell: "{{item}}"
      loop:
        - $(cat /opt/token.txt)
        - rm -rf /opt/token.txt
EOF

4.2 ansible-playbook执行yaml文件

[root@docker-harbor yaml]# ansible-playbook install_k8s_node.yaml

4.3 验证node安装

[root@docker-harbor yaml]# ssh master
Last login: Sat Sep 9 09:07:06 2023 from 192.168.0.111
[root@master ~]# kubectl get nodes
NAME  STATUS  ROLES        AGE    VERSION
master NotReady  control-plane,master   3m52s  v1.20.4
node1  NotReady  <none>         14m    v1.20.4
node2  NotReady  <none>         14m    v1.20.4
node3  NotReady  <none>         14m    v1.20.4

五、安装flannel

5.1下载flannel.yaml文件

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

如果是出现以下下载问题,可以试试修改/etc/resolv.conf文件,指定一下域名服务器8.8.8.8

--2023-09-09 17:21:38-- https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml Resolving raw.githubusercontent.com (raw.githubusercontent.com)...0.0.0.0, :: Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|0.0.0.0|:443... connected. Unable to establish SSL connection.

修改/etc/resolv.conf文件

[root@docker-harbor ~]# cat /etc/resolv.conf #未修改
#Generated by NetworkManager
nameserver 202.96.128.86
nameserver 192.168.195.254


[root@docker-harbor ~]# cat /etc/resolv.conf #已修改
#Generated by NetworkManager
#nameserver 202.96.128.86
#nameserver 192.168.195.254
nameserver 8.8.8.8

5.2 下载完成后查看一下资源文件所使用的镜像
[root@docker-harbor ~]# grep image: kube-flannel.yml
image: docker.io/flannel/flannel-cni-plugin:v1.2.0
image: docker.io/flannel/flannel:v0.22.2
image: docker.io/flannel/flannel:v0.22.2

5.3 安装kube-flannel.yml
先提前下载flannel所使用的镜像

for i in $(grep image: kube-flannel.yml );do docker pull $i ;done

保存镜像,用于copy到各个集群节点

docker save flannel/flannel-cni-plugin:v1.2.0 -o flannel-cni-plugin:v1.2.0
docker save flannel/flannel:v0.22.2 -o flannel:v0.22.2

5.4 编写yaml文件

cat >  /root/yaml/install_flannel.yaml << EOF
---
- hosts: master,node
  tasks:
    - name:
      copy:
        src: "{{item.src}}"
        dest: "{{item.dest}}"
      loop:
        - { src: "/root/flenn/flannel-cni-plugin:v1.2.0", dest: "/root" }
        - { src: "/root/flenn/flannel:v0.22.2", dest: "/root" }
    - name:
      shell: "{{item}}"
      loop:
        - docker load -i flannel-cni-plugin:v1.2.0
        - docker load -i flannel:v0.22.2
    - copy:
        src: /root/flenn/kube-flannel.yml
        dest: /root
      when: ansible_hostname == "master"
    - shell: kubectl apply -f kube-flannel.yml
      when: ansible_hostname == "master"
EOF

六、验证集群

[root@master ~]# kubectl get nodes
NAME  STATUS  ROLES        AGE    VERSION
master Ready     control-plane,master   3m52s  v1.20.4
node1  Ready     <none>         14m    v1.20.4
node2  Ready     <none>         14m    v1.20.4
node3  Ready     <none>         14m    v1.20.4
[root@master ~]# kubectl get pod -n kube-system
NAME            READY  STATUS  RESTARTS AGE
coredns-7f89b7bc75-nk8ws  1/1    Running 0 49m
coredns-7f89b7bc75-srltb   1/1    Running 0  49m
etcd-master          1/1   Running 1  50m
kube-apiserver-master     1/1   Running 1 50m
kube-controller-manager-master 1/1   Running 1  50m
kube-proxy-5nxkg       1/1    Running 1  45m
kube-proxy-62v68       1/1    Running 1 45m
kube-proxy-rfxwq       1/1    Running 1 45m
kube-proxy-w24sd       1/1    Running 1 49m
kube-scheduler-master     1/1   Running  1 50m

七、做个资源测试集群

cat >  /root/yaml/install_k8s_node.yaml << EOF
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: myapache
  annotations:
    kubernetes.io/change-cause: httpd.v1
spec:
  selector:
    matchLabels:
      myapp: httpd
  replicas: 1
  template:
    metadata:
      labels:
        myapp: httpd
    spec:
      containers:
      - name: webcluster
        image: 192.168.0.111/library/web:httpd
        stdin: false
        tty: false
        ports:
        - protocol: TCP
          containerPort: 80
      restartPolicy: Always
EOF
kubectl apply -f myapache.yaml
kubectl get pod -o wide
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值