前言
本文采用centos8.2环境根据bclinux与阿里云过期centos8.2源手动下载保存的rpm包测试的自动化部署方案,请慎用!请慎用!请慎用!此文也是为了方便复制粘贴配置所写
注意:请根据自己的环境进行验证在修改
pre-linux前置环境配置
├── BClinux8.2.repo
├── docker-ce.repo
├── hosts.ini
├── ipvs.conf
├── kubernetes.repo
├── limits.conf
├── package
│ ├── chrony-3.5-1.0.1.an8.x86_64.rpm
│ ├── conntrack-tools-1.4.4-10.el8.x86_64.rpm
│ ├── ipset-7.1-1.el8.x86_64.rpm
│ ├── ipset-libs-7.1-1.el8.x86_64.rpm
│ ├── ipvsadm-1.31-1.el8.x86_64.rpm
│ └── rsync-3.1.3-19.el8.x86_64.rpm
├── pre-linux-init.yml
└── sysctl.conf
#k8s前置配置
repo源(含三个repo)
注意:三个yum源放在了一起请根据名称自行复制切勿合成为一个repo文件
BClinux8.2.repo
[BaseOS]
name= BC-Linux-8.2 - BaseOS
baseurl=http://mirrors.bclinux.org/bclinux/el8.2/BaseOS/x86_64/os/
gpgcheck=0
enabled=1
[AppStream]
name= BC-Linux-8.2 - AppStream
baseurl=http://mirrors.bclinux.org/bclinux/el8.2/AppStream/x86_64/os/
gpgcheck=0
enabled=1
docker-ce.repo
[docker-ce-stable]
name=Docker CE Stable - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/stable
enabled=1
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
[docker-ce-stable-debuginfo]
name=Docker CE Stable - Debuginfo $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/debug-$basearch/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
[docker-ce-stable-source]
name=Docker CE Stable - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/source/stable
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
[docker-ce-test]
name=Docker CE Test - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
[docker-ce-test-debuginfo]
name=Docker CE Test - Debuginfo $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/debug-$basearch/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
[docker-ce-test-source]
name=Docker CE Test - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/source/test
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
[docker-ce-nightly]
name=Docker CE Nightly - $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
[docker-ce-nightly-debuginfo]
name=Docker CE Nightly - Debuginfo $basearch
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/debug-$basearch/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
[docker-ce-nightly-source]
name=Docker CE Nightly - Sources
baseurl=https://mirrors.aliyun.com/docker-ce/linux/centos/$releasever/source/nightly
enabled=0
gpgcheck=1
gpgkey=https://mirrors.aliyun.com/docker-ce/linux/centos/gpg
kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
hosts.ini
[pre_linux]
192.168.200.155 ansible_host=192.168.200.155 ansible_user=root ansible_ssh_pass=123456
192.168.200.156 ansible_host=192.168.200.156 ansible_user=root ansible_ssh_pass=123456
192.168.200.157 ansible_host=192.168.200.157 ansible_user=root ansible_ssh_pass=123456
ipvs.conf
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack_ipv4
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
limits.conf
* soft nofile 655360
* hard nofile 131072
* soft nproc 65535
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
sysctl.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 65536
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
fs.file-max=1000000
pre-linux-init.yml
---
- name: Configure Linux 8.2 System Settings
hosts: pre_linux
gather_facts: yes
become: yes
tasks:
- name: Disable firewalld service
systemd:
name: firewalld
state: stopped
enabled: no
- name: Disable SELinux if not already disabled
selinux:
state: disabled
when: ansible_selinux.status != "disabled"
- name: Disable SWAP if it is active
command: swapoff -a
when: ansible_swaptotal_mb > 0
- name: Comment out swap in /etc/fstab if exists
replace:
path: /etc/fstab
regexp: '^(.*\sswap\s)'
replace: '# \1'
when: ansible_swaptotal_mb > 0
- name: Copy config and package
copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: root
group: root
mode: '0644'
loop:
- { src: "{{ playbook_dir }}/BClinux8.2.repo", dest: "/etc/yum.repos.d/" }
- { src: "{{ playbook_dir }}/docker-ce.repo", dest: "/etc/yum.repos.d/" }
- { src: "{{ playbook_dir }}/kubernetes.repo", dest: "/etc/yum.repos.d/" }
- { src: "{{ playbook_dir }}/limits.conf", dest: "/etc/security/" }
- { src: "{{ playbook_dir }}/ipvs.conf", dest: "/etc/modules-load.d/" }
- { src: "{{ playbook_dir }}/sysctl.conf", dest: "/etc/" }
- { src: "{{ playbook_dir }}/package/chrony-3.5-1.0.1.an8.x86_64.rpm", dest: "/root/pre-linux/" }
- { src: "{{ playbook_dir }}/package/conntrack-tools-1.4.4-10.el8.x86_64.rpm", dest: "/root/pre-linux/" }
- { src: "{{ playbook_dir }}/package/ipset-7.1-1.el8.x86_64.rpm", dest: "/root/pre-linux/" }
- { src: "{{ playbook_dir }}/package/ipvsadm-1.31-1.el8.x86_64.rpm", dest: "/root/pre-linux/" }
- { src: "{{ playbook_dir }}/package/ipset-libs-7.1-1.el8.x86_64.rpm", dest: "/root/pre-linux/" }
- { src: "{{ playbook_dir }}/package/rsync-3.1.3-19.el8.x86_64.rpm", dest: "/root/pre-linux/" }
- name: Install RPM packages
command: rpm -i /tmp/pre-linux/*.rpm --nodeps --force
args:
chdir: /tmp/pre-linux/
- name: Set timezone to Asia/Shanghai
command: timedatectl set-timezone Asia/Shanghai
- name: Apply sysctl settings
command: sysctl --system
- name: Enable and start systemd-modules-load.service
systemd:
name: systemd-modules-load.service
state: started
enabled: yes
- name: Configure chrony to sync time with Aliyun NTP servers
lineinfile:
path: /etc/chrony.conf
line: "server ntp1.aliyun.com iburst"
state: present
- name: Restart chrony service
systemd:
name: chronyd
state: restarted
enabled: yes
k8s高可用配置
k8s-ha
├── check_apiserver.sh #keepalived心跳脚本
├── haproxy.cfg.j2
├── hosts.ini #自定义主机
├── k8s-ha-install.yml
├── keepalived.conf.j2
└── perl-devel #部署依赖包
此脚本限制于keepalived与haproxy都未安装情况下部署
k8s-ha-install.yml
vip: #修改为自己vip
#安装haproxy与keepalived
ansible-playbook -i hosts.ini k8s-ha-install.yml
k8s-vip-install.yml
- hosts: k8s_ha
gather_facts: yes
become: yes
vars:
auth_pass: HA_AUTH
vip: "192.168.200.200"
tasks:
- name: Check if Haproxy and keepalived is installed
command: rpm -q haproxy keepalived
register: rpm_installed
failed_when: rpm_installed.rc not in [0, 2]
changed_when: rpm_installed.rc == 2
- name: Copy RPM packages to remote server
copy:
src: "{{ item }}"
dest: "/tmp/"
owner: root
group: root
mode: '0644'
with_items:
- "{{ playbook_dir }}/keepalived-2.0.10-10.el8.1.x86_64.rpm"
- "{{ playbook_dir }}/haproxy-1.8.23-3.el8.x86_64.rpm"
when: rpm_installed.rc == 2
- name: Install RPM packages
command: rpm -i /tmp/keepalived-2.0.10-10.el8.1.x86_64.rpm haproxy-1.8.23-3.el8.x86_64.rpm
args:
chdir: /tmp/
when: rpm_installed.rc == 2
#配置keepalived
- name: Copy check_apiserver.sh
copy:
src: "{{ playbook_dir }}/check_apiserver.sh"
dest: /etc/keepalived/
owner: root
group: root
mode: '0644'
- name: Configure Keepalived
template:
src: "{{ playbook_dir }}/keepalived.conf.j2"
dest: /etc/keepalived/keepalived.conf
vars:
state: "{{ 'MASTER' if hostvars[inventory_hostname].role == 'master' else 'BACKUP' }}"
priority: "{{ 100 if hostvars[inventory_hostname].role == 'master' else 50 if hostvars[inventory_hostname].role == 'backup1' else 40 }}"
notify: restart keepalived
- name: Start Keepalived
service:
name: keepalived
state: started
enabled: yes
handlers:
- name: restart keepalived
service:
name: keepalived
state: restarted
check_apiserver.sh
#!/bin/bash
err=0
for k in $(seq 1 3);do
check_code=$(pgrep haproxy)
if [[ $check_code == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
haproxy.cfg.j2
global
maxconn 2000
ulimit-n 16384
log 127.0.0.1 local0 err
stats timeout 30s
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend monitor-in
bind *:33305
mode http
option httplog
monitor-uri /monitor
frontend k8s-master
bind 0.0.0.0:16443
bind 127.0.0.1:16443
mode tcp
option tcplog
tcp-request inspect-delay 5s
default_backend k8s-master
backend k8s-master
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
{% for host in groups['k8s_ha'] %}
server {{ hostvars[host]['ansible_hostname'] }} {{ hostvars[host]['ansible_host'] }}:16443 check
{% endfor %}
hosts.ini
[k8s_ha]
192.168.200.155 ansible_host=192.168.200.155 ansible_user=root ansible_ssh_pass=123456 role=master
192.168.200.156 ansible_host=192.168.200.156 ansible_user=root ansible_ssh_pass=123456 role=backup
192.168.200.157 ansible_host=192.168.200.157 ansible_user=root ansible_ssh_pass=123456 role=backup1
#role为脚本内匹配角色值需要针对master与backup修改
keepalived.conf.j2
! Configuration File for Keepalived
global_defs {
router_id LVS_DEVEL
script_user root
enable_script_security
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state {{ 'MASTER' if hostvars[inventory_hostname].role == 'master' else 'BACKUP' }}
interface ens160
mcast_src_ip {{ ansible_default_ipv4.address }}
virtual_router_id 51
priority {{ 100 if hostvars[inventory_hostname].role == 'master' else 50 if hostvars[inventory_hostname].role == 'backup1' else 40 }}
advert_int 1
authentication {
auth_type PASS
auth_pass {{ auth_pass }}
}
track_script {
chk_apiserver
}
virtual_ipaddress {
{{ vip }}
}
}
etcd配置(二进制)
├── ca-config.json
├── ca-csr.json
├── client.json
├── etcd-install.yml
├── etcd.json
├── etcd.service.j2
├── hosts.ini
└── tool #etcd证书工具目录
├── cfssl
├── cfssljson
├── etcd
└── etcdctl
#etcd.json需要自行修改ip地址
ansible-playbook -i hosts.ini etcd-install.yml
ca-config.json
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"server": {
"expiry": "876000h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
},
"client": {
"expiry": "876000h",
"usages": [
"signing",
"key encipherment",
"client auth"
]
},
"peer": {
"expiry": "876000h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
ca-csr.json
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
}
}
client.json
{
"CN": "client",
"key": {
"algo": "ecdsa",
"size": 256
}
}
etcd.json(ip需要修改)
{
"CN": "etcd",
"hosts": [
"192.168.200.155",
"192.168.200.156",
"192.168.200.157"
],
"key": {
"algo": "ecdsa",
"size": 256
},
"names": [
{
"C": "CN",
"L": "BJ",
"ST": "BJ"
}
]
}
etcd.service.j2
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
[Service]
Type=notify
WorkingDirectory={{ etcd_data_dir }}
ExecStart={{ etcd_binary_path }}/etcd --name={{ inventory_hostname }} --cert-file={{ etcd_tls_path }}/server.pem --key-file={{ etcd_tls_path }}/server-key.pem --peer-cert-file={{ etcd_tls_path }}/peer.pem --peer-key-file={{ etcd_tls_path }}/peer-key.pem --trusted-ca-file={{ etcd_tls_path }}/ca.pem --peer-trusted-ca-file={{ etcd_tls_path }}/ca.pem --initial-advertise-peer-urls=https://{{ ansible_host }}:2380 --listen-peer-urls=https://{{ ansible_host }}:2380 --listen-client-urls=https://{{ ansible_host }}:2379 --advertise-client-urls=https://{{ ansible_host }}:2379 --initial-cluster-token=etcd-cluster-0 --initial-cluster=etcd1=https://{{ etcd1 }}:2380,etcd2=https://{{ etcd2 }}:2380,etcd3=https://{{ etcd3 }}:2380 --initial-cluster-state=new --data-dir={{ etcd_data_dir }} --snapshot-count=50000 --auto-compaction-retention=1 --max-request-bytes=10485760 --quota-backend-bytes=8589934592
Restart=always
RestartSec=15
LimitNOFILE=65536
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
etcd-install.yml
- hosts: etcd
remote_user: root
vars:
etcd_data_dir: "/data/etcd/"
etcd_binary_path: "/data/etcd/bin"
etcd_tls_path: "/data/etcd/ssl"
etcd_version: "3.4.3"
etcd1: "192.168.200.158"
etcd2: "192.168.200.159"
etcd3: "192.168.200.160"
cert_types:
- { name: "client", config: "ca-config.json", profile: "client", csr_file: "client.json" }
- { name: "server", config: "ca-config.json", profile: "server", csr_file: "etcd.json" }
- { name: "peer", config: "ca-config.json", profile: "peer", csr_file: "etcd.json" }
tasks:
- name: Ensure necessary directories exist
file:
path: "{{ item }}"
state: directory
mode: 0755
loop:
- "{{ etcd_data_dir }}"
- "{{ etcd_binary_path }}"
- "{{ etcd_tls_path }}"
- name: Copy tool
copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: root
group: root
mode: '0755'
loop:
- { src: "{{ playbook_dir }}/tool/cfssl", dest: /usr/bin/cfssl }
- { src: "{{ playbook_dir }}/tool/cfssljson", dest: /usr/bin/cfssljson }
- { src: "{{ playbook_dir }}/tool/etcd", dest: "{{ etcd_binary_path }}/etcd" }
- { src: "{{ playbook_dir }}/tool/etcdctl", dest: "{{ etcd_binary_path }}/etcdctl" }
- name: Initialize CA certificate and key
shell: >-
cfssl gencert -initca "{{ playbook_dir }}/ca-csr.json" |
cfssljson -bare "{{ etcd_tls_path }}/ca"
delegate_to: "{{ etcd1 }}"
run_once: true
- name: Generate certificates and keys
shell: >-
cfssl gencert -ca="{{ etcd_tls_path }}/ca.pem" \
-ca-key="{{ etcd_tls_path }}/ca-key.pem" \
-config="{{ playbook_dir }}/{{ item.config }}" \
-profile="{{ item.profile }}" \
"{{ playbook_dir }}/{{ item.csr_file }}" | \
cfssljson -bare "{{ etcd_tls_path }}/{{ item.name }}"
loop: "{{ cert_types }}"
delegate_to: "{{ etcd1 }}"
run_once: true
- name: Synchronize CA directory
synchronize:
src: /data/etcd/ssl/
dest: /data/etcd/ssl/
delete: yes
compress: yes
recursive: yes
- name: Create etcd systemd service file
template:
src: "{{ playbook_dir }}/etcd.service.j2"
dest: /usr/lib/systemd/system/etcd.service
owner: root
group: root
mode: '0644'
notify:
- reload etcd
- enable and start etcd
handlers:
- name: reload etcd
systemd:
daemon_reload: yes
- name: enable and start etcd
systemd:
name: etcd
state: started
enabled: yes
hosts.ini
[etcd]
etcd1 ansible_host=192.168.200.155 ansible_user=root ansible_ssh_pass=123456
etcd2 ansible_host=192.168.200.156 ansible_user=root ansible_ssh_pass=123456
etcd3 ansible_host=192.168.200.157 ansible_user=root ansible_ssh_pass=123456
k8s安装
k8s-install/
├── daemon.json #json文件
├── docker-19.03.14 #docker rpm包
├── hosts.ini
├── images #镜像
├── k8s-1.19.1 #k8s rpm包
├── k8s-install.yml
├── kubeadm-init.yaml.j2 #初始化模板
└── test.yml #测试yml 可忽略
daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "5"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"data-root": "/data/docker",
"live-restore": true,
"debug": false,
"experimental": false
}
hosts.ini
[k8s]
k8s-1 ansible_host=192.168.200.155 ansible_user=root ansible_ssh_pass=123456
k8s-2 ansible_host=192.168.200.156 ansible_user=root ansible_ssh_pass=123456
k8s-3 ansible_host=192.168.200.157 ansible_user=root ansible_ssh_pass=123456
k8s-install.yml
- hosts: k8s
remote_user: root
vars:
rsync_tasks:
- src: "{{ playbook_dir }}/images"
dest: /tmp/
- src: "{{ playbook_dir }}/docker-19.03.14"
dest: /tmp/
- src: "{{ playbook_dir }}/k8s-1.19.1"
dest: /tmp/
- src: "{{ playbook_dir }}/daemon.json"
dest: /etc/docker/
init_ip: 192.168.200.156
tasks:
- name: Synchronize directories to remote host
synchronize:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: push
compress: yes
loop: "{{ rsync_tasks }}"
- name: Check if Docker and Kubernetes is installed
command: rpm -q kubelet kubeadm kubectl docker-ce
register: rpm_installed
ignore_errors: yes
- name: Install Docker K8s RPMs if not installed
shell: |
rpm -Uvh /tmp/docker-19.03.14/*.rpm --nodeps --force &&
rpm -Uvh /tmp/k8s-1.19.1/*.rpm --nodeps --force
when: rpm_installed.rc == 4
- name: Reload systemd manager configuration
ansible.builtin.systemd:
daemon_reload: yes
- name: Start and enable Docker service
ansible.builtin.systemd:
name: docker
state: started
enabled: yes
- name: Enable and start kubelet service
ansible.builtin.systemd:
name: kubelet
enabled: yes
state: started
- name: Check if there are any Docker images on the host
shell: docker images -q --all
register: image_check
changed_when: false
- name: Find Docker image tar files
find:
paths: /tmp/images/
patterns: "*.tar"
file_type: file
register: tar_files
- name: Load all Docker images if none are present
command: docker load -i "{{ item }}"
loop: "{{ tar_files.files | map(attribute='path') | list }}"
when: image_check.stdout == ""
become: yes
- name: Create PKI directories
file:
path: "{{ item }}"
state: directory
owner: root
group: root
mode: '0755'
with_items:
- /etc/kubernetes/pki
- /etc/kubernetes/pki/etcd
- name: ETCD Copy ssl
copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: root
group: root
mode: '0644'
loop:
- { src: '/data/etcd/ssl/ca.pem', dest: '/etc/kubernetes/pki/etcd/ca.pem' }
- { src: '/data/etcd/ssl/client.pem', dest: '/etc/kubernetes/pki/apiserver-etcd-client.pem' }
- { src: '/data/etcd/ssl/client-key.pem', dest: '/etc/kubernetes/pki/apiserver-etcd-client-key.pem' }
delegate_to: "{{ init_ip }}"
run_once: true
- name: Render the kubeadm-init template with variables
template:
src: "{{ playbook_dir }}/kubeadm-init.yaml.j2"
dest: /etc/kubernetes/kubeadm-init.yaml
delegate_to: "{{ init_ip }}"
run_once: true
vars:
kubernetes_address: "192.168.200.156"
kubernetes_vip: "192.168.200.200"
kubernetes_version: "v1.19.1"
kubernetes_name: "k8s-2"
etcd1: "192.168.200.155"
etcd2: "192.168.200.156"
etcd3: "192.168.200.157"
pod_subnet: "10.244.0.0/16"
service_subnet: "10.96.0.0/12"
image_repository: "k8s.gcr.io"
- name: Initialize the Kubernetes cluster using kubeadm
command: kubeadm init --config /etc/kubernetes/kubeadm-init.yaml --upload-certs
delegate_to: "{{ init_ip }}"
run_once: true
register: kubeadm_init
- name: Setup kubeconfig for current user in one shell task
shell: |
mkdir -p $HOME/.kube;
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config;
chown $(id -u):$(id -g) $HOME/.kube/config
become: yes
become_user: "{{ ansible_user }}"
delegate_to: "{{ init_ip }}"
run_once: true
when: kubeadm_init.rc == 0
kubeadm-init.yaml.j2
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: {{ kubernetes_address }} #本机ip
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: {{ kubernetes_name }}
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
# local:
#dataDir: /var/lib/etcd
external:
endpoints:
- https://{{ etcd1 }}:2379
- https://{{ etcd2 }}:2379
- https://{{ etcd3 }}:2379
caFile: /etc/kubernetes/pki/etcd/ca.pem #搭建etcd集群时生成的ca证书
certFile: /etc/kubernetes/pki/apiserver-etcd-client.pem #搭建etcd集群时生成的客户端证书
keyFile: /etc/kubernetes/pki/apiserver-etcd-client-key.pem #搭建etcd集群时生成的客户端密钥
imageRepository: {{ image_repository }} #镜像提前下载
kind: ClusterConfiguration
kubernetesVersion: {{ kubernetes_version }} #版本要与k8s版本保持一致
controlPlaneEndpoint: {{ kubernetes_vip }}:16443
networking:
dnsDomain: cluster.local
podSubnet: {{ pod_subnet }}
serviceSubnet: {{ service_subnet }}
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
操作步骤
1.安装ansible
2.安装pre-linux
3.安装k8s-ha
4.安装etcd
5.安装k8s-install
部署流程
#创建目录解压压缩包
unzip k8s-1.19.1.zip && cd k8s-1.19.1
#安装ansible工具
sh ansible-install.sh
#部署前置环境pre-linux
cd pre-linux
vim hosts.ini
[pre_linux]
192.168.200.158 ansible_host=192.168.200.158 ansible_user=root ansible_ssh_pass=123456
192.168.200.159 ansible_host=192.168.200.159 ansible_user=root ansible_ssh_pass=123456
192.168.200.160 ansible_host=192.168.200.160 ansible_user=root ansible_ssh_pass=123456
ansible-playbook -i hosts.ini pre-linux-init.yml
#部署高可用组件k8s-ha
cd k8s-ha
#修改配置
vi k8s-ha-install.yml
vip: "192.168.200.200"
vi hosts.ini
[k8s_ha]
192.168.200.158 ansible_host=192.168.200.158 ansible_user=root ansible_ssh_pass=123456 role=master
192.168.200.159 ansible_host=192.168.200.159 ansible_user=root ansible_ssh_pass=123456 role=backup
192.168.200.160 ansible_host=192.168.200.160 ansible_user=root ansible_ssh_pass=123456 role=backup1
#部署
ansible-playbook -i hosts.ini k8s-ha-install.yml
#部署etcd
cd etcd
#修改ip配置
vi etcd.json
etcd.json
{
"CN": "etcd",
"hosts": [
"192.168.200.158",
"192.168.200.159",
"192.168.200.160"
#修改配置
vi hosts.ini
[etcd]
etcd1 ansible_host=192.168.200.158 ansible_user=root ansible_ssh_pass=123456
etcd2 ansible_host=192.168.200.159 ansible_user=root ansible_ssh_pass=123456
etcd3 ansible_host=192.168.200.160 ansible_user=root ansible_ssh_pass=123456
vi etcd-install.yml
etcd1: "192.168.200.158"
etcd2: "192.168.200.159"
etcd3: "192.168.200.160"
#部署
ansible-playbook -i hosts.ini etcd-install.yml
#部署k8s
cd k8s-install
vim hosts.ini
[k8s]
k8s-158 ansible_host=192.168.200.158 ansible_user=root ansible_ssh_pass=123456
k8s-159 ansible_host=192.168.200.159 ansible_user=root ansible_ssh_pass=123456
k8s-160 ansible_host=192.168.200.160 ansible_user=root ansible_ssh_pass=123456
vi k8s-install.yml
vars:
kubernetes_address: "192.168.200.158"
kubernetes_vip: "192.168.200.200"
kubernetes_version: "v1.19.1"
kubernetes_name: "k8s-158"
etcd1: "192.168.200.158"
etcd2: "192.168.200.159"
etcd3: "192.168.200.160"
pod_subnet: "10.244.0.0/16"
service_subnet: "10.96.0.0/12"
image_repository: "k8s.gcr.io"
#部署
ansible-playbook -i hosts.ini k8s-install.yml
master加入k8s集群
kubeadm init phase upload-certs --upload-certs --config /etc/kubernetes/kubeadm-init.yaml
kubeadm token create --print-join-command
#拼接
kubeadm join 192.168.200.200:16443 --token 1j8cfm.p24w6o38ric4tzi7 --discovery-token-ca-cert-hash sha256:f8d5157d859435a95c243c90cae9fed685056762037018492035c95f5d6fff2f --control-plane --certificate-key a4cf06f277525ea4bec5a56f8f0183c056cfa2cd0691d23e84c3b0a844c92657
安装包脚本链接地址
链接:https://caiyun.139.com/m/i?145CGc1Difyce
提取码:Kx0B
复制内容打开移动云盘PC客户端,操作更方便哦