参考传送门
准备工作
配置好免密登录
ansible hosts文件
修改ansible配置文件
playbook里面有部分信息是要自己修改的,请勿直接运行
有两块磁盘,disk_name填写的位置
---
- hosts: all #所有有节点
remote_user: root
vars:
disk_name: sdb #划分vg的盘符
vgdocker: vgdocker #自定义名称
lv_size: 95 #vg自定义大小
tasks:
- name: 永久关闭 selinux
lineinfile:
dest: /etc/selinux/config
regexp: "^SELINUX="
line: "SELINUX=disabled"
- name: 临时关闭 selinux
shell: "setenforce 0"
failed_when: FALSE
- name: 关闭swap
shell: "swapoff -a && sed -i '/swap/s/^\\(.*\\)$/#\\1/g' /etc/fstab"
- name: install yum-utils
yum: name=yum-utils state=present
- name: install device-mapper-persistent-data
yum: name=device-mapper-persistent-data state=present
- name: install lvm2
yum: name=lvm2 state=present
- name: add docker-ce.repo
shell: yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
- name: install docker
shell: yum install docker-ce-18.06.3.ce-3.el7 -y
- name: 检查pv是否存在
shell: pvscan|grep {{disk_name}}
register: pv_check
failed_when: False
- name: "创建pv"
shell: pvcreate /dev/{{disk_name}}
when: pv_check.rc==1
- name: 检查vg是否存在
shell: vgscan|grep {{vgdocker}}
register: vg_check
failed_when: False
- name: "创建vgdocker"
shell: vgcreate {{vgdocker}} /dev/{{disk_name}}
when: vg_check.rc==1
- name: 检查lv thinpool是否存在
shell: lvscan|grep thinpool
register: thinpool_check
failed_when: False
#- name: 查看注册变量
# debug: msg="{{thinpool_check}}"
- name: "创建lv thinpool"
shell: lvcreate --wipesignatures y -n thinpool -l {{lv_size}}%VG {{vgdocker}}
when: thinpool_check.rc==1
- name: 检查lv thinpoolmeta是否存在
shell: lvscan|grep thinpoolmeta
register: thinpoolmeta_check
failed_when: False
#- name: 查看注册变量
# debug: msg="{{thinpoolmeta_check}}"
- name: “创建thinpoolmeta”
shell: lvcreate --wipesignatures y -n thinpoolmeta -l 1%VG {{vgdocker}}
when: thinpoolmeta_check.rc==1
- name: 检查thinpool_tdata是否存在
shell: lvs -a | grep thinpool_tdata
register: thinpool_tdata
failed_when: False
- name: “lvconvert”
shell: lvconvert -y --zero n -c 512K --thinpool vgdocker/thinpool --poolmetadata vgdocker/thinpoolmeta
when: thinpool_tdata.rc==1
- name: "自动增长"
shell:
cmd: |
cat >/etc/lvm/profile/docker-thinpool.profile<<EOF
activation {
thin_pool_autoextend_threshold=80
thin_pool_autoextend_percent=20
}
EOF
- name: "lvchange"
shell: lvchange --metadataprofile docker-thinpool vgdocker/thinpool
- name: 在etc下创建docker目录
file: name=/etc/docker state=directory owner=root group=root mode=700
failed_when: false
- name: "修改docker daemon.json"
shell:
cmd: |
cat >/etc/docker/daemon.json<<EOF
{
"registry-mirrors": ["换成自己加速器的地址"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "devicemapper",
"storage-opts": [ "dm.thinpooldev=/dev/mapper/vgdocker-thinpool",
"dm.use_deferred_removal=true",
"dm.use_deferred_deletion=true",
"dm.basesize=10G"
]
}
EOF
- name: "配置kubernetes.repo"
shell:
cmd: |
cat >/etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
- name:
shell: yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
- name: 启动kubelet并开机启动
service:
name: kubelet
state: restarted
enabled: yes
- name:
shell:
cmd: |
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system
- hosts: master
remote_user: root
tasks:
- name: restart docker
service: enabled=true name=docker state=restarted
notify: pull images
handlers:
- name: pull images
script: /opt/script/docker/pull_images.sh
mkdir -p /opt/script/docker
vim /opt/script/docker/pull_images.sh
#/bin/bash
BASE_URL="registry.aliyuncs.com/google_containers/"
CHANGE_URL="k8s.gcr.io/"
KUBE_VERSION="v1.18.0"
COREDNS_VERSION="1.6.7"
ETCD_VERSION="3.4.3-0"
PAUSE_VERSION="3.2"
FLANEL_BASE_URL="quay-mirror.qiniu.com/coreos/"
FLANNEL_VERSION="v0.11.0-amd64"
array=("kube-apiserver" "kube-controller-manager" "kube-proxy" "kube-scheduler")
for(( i=0;i<${#array[@]};i++)) do
docker pull $BASE_URL${array[i]}:$KUBE_VERSION
id=`docker images|grep ${array[i]}|awk '{print $3}'|sort -u`
docker tag $id $CHANGE_URL${array[i]}:$KUBE_VERSION
docker rmi $BASE_URL${array[i]}:$KUBE_VERSION
done;
ITEM="coredns"
docker pull $BASE_URL$ITEM:$COREDNS_VERSION
id=`docker images|grep $ITEM|awk '{print $3}'|sort -u`
docker tag $id $CHANGE_URL$ITEM:$COREDNS_VERSION
docker rmi $BASE_URL$ITEM:$COREDNS_VERSION
ITEM="etcd"
docker pull $BASE_URL$ITEM:$ETCD_VERSION
id=`docker images|grep $ITEM|awk '{print $3}'|sort -u`
docker tag $id $CHANGE_URL$ITEM:$ETCD_VERSION
docker rmi $BASE_URL$ITEM:$ETCD_VERSION
ITEM="pause"
docker pull $BASE_URL$ITEM:$PAUSE_VERSION
id=`docker images|grep $ITEM|awk '{print $3}'|sort -u`
docker tag $id $CHANGE_URL$ITEM:$PAUSE_VERSION
docker rmi $BASE_URL$ITEM:$PAUSE_VERSION