Kubernetes
K8s用途
帮助人们管理Docker容器,对Docker及容器进行更高级更灵活的管理 K8s是容器集群管理系统,是一个开源的平台,可以实现容器集群的自动化部署、自动扩缩容、维护等
K8s架构
Master (管理节点)
Master提供集群的控制 对集群进行全局决策 检测和响应集群事件 Master主要由Apiserver,Kubeproxy,Scheduler,Controllermanager和Etcd服务组成
Apiserver
是整个系统的对外接口,供客户端的其他组件调用 后端元数据存储于etcd中(键值数据库) Scheduler
Controller
Etcd
CoreOS团队开源项目,高可用的分布式键值数据库,基于Go语言 K8s在运行过程中产生的元数据全部存储在etcd中 在键的组织上etcd采用层次化的空间结构(类似文件系统中的目录的概念)用户指定的键可以为单独的名字 也可以指定目录结构 /dir/ex/keyname etcd有K8s集群自动管理,用户无需手动干预 etcdctl是etcd的客户端管理程序 Node(计算节点) Image (镜像仓库)
Master服务端口
协议 端口范围 软件 用途 TCP 6443 kube-apiserver 所有组件接口服务 TCP 2379-2380 etcd kube-api,etcd服务 TCP 10250 kubelet kubelet服务 TCP 10251 kube-scheduler kube-scheduler服务 TCP 10252 kube-controller-manager kube-controller-manager服务
K8s适用场景
有大量跨主机的容器需要管理 快速部署应用 快速扩展应用 无缝对接新的应用功能 节省资源,优化硬件资源的使用
Kubernetes搭建使用
机器列表
主机名 IP地址 角色 说明 master 192.168.1.21 kube-master 管理节点 node-0001 192.168.1.31 kube-node 计算节点 node-0002 192.168.1.32 kube-node 计算节点 node-0003 192.168.1.33 kube-node 计算节点 registry 192.168.1.100 registry 私有镜像仓库 proxy 192.168.1.252 proxy 跳板机
跳板机配置yum仓库
cp -a k8s-install /var/ftp/localrepo
cd /var/ftp/localrepo
createrepo --update .
master安装工具软件包
yum makecache
yum install -y kubeadm kubelet kubectl docker-ce
mkdir -p /etc/docker
vim /etc/docker/daemon.json
== == == == == == == == == == == == == == == =
{
"exec-opts" : [ "native.cgroupdriver=systemd" ] ,
"registry-mirrors" : [ "https://hub-mirror.c.163.com" ] ,
"insecure-registries" :[ "192.168.1.100:5000" , "registry:5000" ]
}
== == == == == == == == == == == == == == == == == == == =
systemctl enable --now docker kubelet
docker info | grep Cgroup
Cgroup Driver: systemd
vim /etc/sysctl.d/k8s.conf
== == == == == == == == == == == == == == == ==
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
== == == == == == == == == == == == == == == == == == ==
modprobe br_netfilter
sysctl --system
镜像导入私有仓库
for i in *.tar.gz;
do
docker load -i ${i} ;
done
docker images | awk '$2 != "TAG"{print $1 ,$2 }' | while read _f _v;
do
docker tag ${_f} : ${_v} 192.168.1.100:5000/${ _f
docker push 192.168.1.100:5000/${ _f
docker rmi ${_f} : ${_v} ;
done
curl http://192.168.1.100:5000/v2/_catalog
{ "repositories" :[ "centos" ,"coredns" ,"etcd" ,"kube-apiserver" ,"kube-controller-manager" ,"kube-proxy" ,"kube-scheduler" ,"myos" ,"nginx" ,"pause" ,"redis" ,"ubuntu" ] }
仓库初始化
ls
myos.tar.gz init-img.sh
cat init-img.sh
== == == == == == == == == == == == == == ==
yum install -y docker-ce
mkdir -p /etc/docker
cat > /etc/docker/daemon.json << 'EOF'
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://hub-mirror.c.163.com"],
"insecure-registries":["192.168.1.100:5000", "registry:5000"]
}
EOF
systemctl enable --now docker.service
systemctl restart docker.service
docker load -i myos.tar.gz
cat > Dockerfile<< 'EOF'
FROM myos:latest
ENV LANG=C
WORKDIR /var/www/html/
EXPOSE 80
CMD ["/usr/sbin/httpd", "-DFOREGROUND"]
EOF
docker build -t 192.168.1.100:5000/myos:httpd .
cat > Dockerfile<< 'EOF'
FROM myos:latest
EXPOSE 9000
WORKDIR /usr/local/nginx/html
CMD ["/usr/sbin/php-fpm", "--nodaemonize"]
EOF
docker build -t 192.168.1.100:5000/myos:php-fpm .
cat > Dockerfile<< 'EOF'
FROM myos:latest
EXPOSE 80
WORKDIR /usr/local/nginx/html
CMD ["/usr/local/nginx/sbin/nginx", "-g", "daemon off;"]
EOF
docker build -t 192.168.1.100:5000/myos:nginx .
rm -f Dockerfile
docker tag myos:latest 192.168.1.100:5000/myos:v1804
for i in v1804 httpd php-fpm nginx; do
docker push 192.168.1.100:5000/myos:${i}
done
== == == == == == == == == == == == == == == == == == =
./init-img.sh
curl http://192.168.1.100:5000/v2/myos/tags/list
Tab键帮助设置
kubectl completion bash > /etc/bash_completion.d/kubectl
kubeadm completion bash > /etc/bash_completion.d/kubeadm
exit
安装IPVS代理软件包
yum install -y ipvsadm ipset
系统初始化
vim /etc/hosts
== == == == == == == == == == ==
192.168.1.21 master
192.168.1.31 node-0001
192.168.1.32 node-0002
192.168.1.33 node-0003
192.168.1.100 registry
== == == == == == == == == == == == == ==
kubeadm init --dry-run
使用kubeadm部署
mkdir init; cd init
vim /kubeadm-init.yaml
== == == == == == == == == == == == == == =
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.1.21
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: master
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: { }
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: 192.168.1.100:5000
kind: ClusterConfiguration
kubernetesVersion: v1.17.6
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16
serviceSubnet: 10.254.0.0/16
scheduler: { }
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
== == == == == == == == == == == == == == == == == =
kubeadm init --config= kubeadm-init.yaml | tee master-init.log
mkdir -p $HOME /.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME /.kube/config
sudo chown $( id -u) : $( id -g) $HOME /.kube/config
验证安装结果
kubectl version
kubectl get componentstatuses
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy { "health" : "true" }
计算节点安装
获取token
kubeadm token create --ttl= 0 --print-join-command
kubeadm token list
TOKEN .. .. .
abcdef.0123456789abcdef .. .. .. .
openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der | openssl dgst -sha256 -hex
writing RSA key
( stdin) = 89d0862336f4a2d995331fcdbe5c311edb80a7ddcbe98fd7f0f9848be8a402f6
node安装
cd node-install/
vim ansible.cfg
== == == == == == == == == == == == == == == == == == == =
[ defaults]
inventory = hostlist.yaml
host_key_checking = False
== == == == == == == == == == == == == == == == == == == == == =
vim hostlist.yaml
== == == == == == == == == == == == == ==
all:
children:
nodes:
hosts:
192.168.1.31: { }
192.168.1.32: { }
192.168.1.33: { }
ungrouped: { }
== == == == == == == == == == == == == == == == =
mkdir files
vim files/hosts
== == == == == == == == == == == == == == == == =
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
192.168.1.21 master
192.168.1.31 node-0001
192.168.1.32 node-0002
192.168.1.33 node-0003
192.168.1.100 registry
== == == == == == == == == == == == == == == == == ==
vim files/daemon.json
== == == == == == == == == == == == == == ==
{
"exec-opts" : [ "native.cgroupdriver=systemd" ] ,
"registry-mirrors" : [ "https://hub-mirror.c.163.com" ] ,
"insecure-registries" :[ "192.168.1.100:5000" , "registry:5000" ]
}
== == == == == == == == == == == == == == == == == =
vim files/k8s.conf
== == == == == == == == == == == == == ==
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
== == == == == == == == == == == == == == == == =
vim node_install.yaml
== == == == == == == == == == == == == ==
---
- name:
hosts:
- nodes
vars:
master: '192.168.1.21:6443'
token: 'abcdef.0123456789abcdef'
token_hash: 'sha256:89d0862336f4a2d995331fcdbe5c311edb80a7ddcbe98fd7f0f9848be8a402f6'
tasks:
- name: disable swap
lineinfile:
path: /etc/fstab
regexp: 'swap'
state: absent
notify: disable swap
- name: Ensure SELinux is set to disabled mode
lineinfile:
path: /etc/selinux/config
regexp: '^SELINUX='
line: SELINUX= disabled
notify: disable selinux
- name: remove the firewalld
yum:
name:
- firewalld
- firewalld-filesystem
state: absent
- name: install k8s node tools
yum:
name:
- kubeadm
- kubelet
- docker-ce
- ipvsadm
- ipset
state: present
update_cache: yes
- name: Create a directory if it does not exist
file:
path: /etc/docker
state: directory
mode: '0755'
- name: Copy file with /etc/hosts
copy:
src: files/hosts
dest: /etc/hosts
owner: root
group: root
mode: '0644'
- name: Copy file with /etc/docker/daemon.json
copy:
src: files/daemon.json
dest: /etc/docker/daemon.json
owner: root
group: root
mode: '0644'
- name: Copy file with /etc/sysctl.d/k8s.conf
copy:
src: files/k8s.conf
dest: /etc/sysctl.d/k8s.conf
owner: root
group: root
mode: '0644'
notify: enable sysctl args
- name: enable k8s node service
service:
name: "{{ item }}"
state: started
enabled: yes
with_items:
- docker
- kubelet
- name: check node state
stat:
path: /etc/kubernetes/kubelet.conf
register: result
- name: node join
shell: kubeadm join '{{ master }}' --token '{{ token }}' --discovery-token-ca-cert-hash '{{ token_hash }}'
when: result.stat.exists == False
handlers:
- name: disable swap
shell: swapoff -a
- name: disable selinux
shell: setenforce 0
- name: enable sysctl args
shell: sysctl --system
== == == == == == == == == == == == == == == == == == =
ansible-playbook node_install.yaml
验证节点安装情况
kubectl get nodes
NAME STATUS ROLES AGE VERSION
master NotReady master 29m v1.17.6
node-0001 NotReady < none> 7m11s v1.17.6
node-0002 NotReady < none> 7m8s v1.17.6
node-0003 NotReady < none> 7m4s v1.17.6
网络插件安装配置
上传flannel镜像到私有仓库
mkdir flannel ; cd flannel
cp .. /flannel.tar.gz .
docker load -i flannel.tar.gz
docker tag quay.io/coreos/flannel:v0.12.0-amd64 192.168.1.100:5000/flannel:v0.12.0-amd64
docker push 192.168.1.100:5000/flannel:v0.12.0-amd64
修改配置文件并安装
cd flannel
vim kube-flannel.yml
== == == == == == == == == == == == == == =
128: "Network" : "10.244.0.0/16" ,
172: image: 192.168.1.100:5000/flannel:v0.12.0-amd64
186: image: 192.168.1.100:5000/flannel:v0.12.0-adm64
227-结尾: 删除 嵌入式系统的配置
== == == == == == == == == == == == == == == == ==
kubectl apply -f kube-flannel.yml
验证结果
kubectl get nodes
master Ready master 29m v1.17.6
node-0001 Ready < none> 7m11s v1.17.6
node-0002 Ready < none> 7m8s v1.17.6
node-0003 Ready < none> 7m4s v1.17.6