1.环境准备
1.1 操作系统(centos 7)
[root@master isshpan]# uname -a
Linux master 3.10.0-957.el7.x86_64 #1 SMP Thu Nov 8 23:39:32 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
[root@master isshpan]# cat /etc/redhat-release
CentOS Linux release 7.6.1810 (Core)
1.2 主机信息
准备三台centos虚拟机,一个master,两个node。
1.3 设置虚拟机主机
设置hostname(/etc/hostname,需要重启),配置相应的hosts信息(/etc/hosts)。
[root@master isshpan]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.200.140 master
192.168.200.142 node1
192.168.200.141 node2
关闭防火墙
[root@master isshpan]# systemctl disable firewalld.service
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@master isshpan]# systemctl stop firewalld.service
关闭selinux,设置selinux=disabled
[root@localhost isshpan]# vim /etc/selinux/config
systemctl enable ntpd
systemctl start ntpd
2.安装docker
注意此处其实不需要安装docker,因为安装docker可能会导致后来安装kubernetes冲突报错,如果已安装可以先卸载docker。
2.1 添加阿里镜像
[root@master isshpan]# sudo yum install -y yum-utils device-mapper-persistent-data lvm2
[root@master isshpan]# sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
2.2 查看版本并安装docker
[root@master isshpan]# yum list docker-ce --showduplicates
[root@master isshpan]# yum install docker-ce-18.06.3.ce
2.3 启动docker
[root@master isshpan]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@master isshpan]# systemctl start docker
2.4 查看版本
[root@master isshpan]# docker --version
Docker version 18.06.3-ce, build d7080c1
3. 安装和部署etcd
3.1 安装etcd
[root@master isshpan]# yum install etcd -y
3.2 配置etcd
注意在ETCD_LISTEN_CLIENT_URLS后面加入本机
[isshpan@master ~]$ cat /etc/etcd/etcd.conf | grep -v “^#”
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_CLIENT_URLS=“http://192.168.200.140:2379,http://127.0.0.1:2379”
ETCD_NAME=“master”
ETCD_ADVERTISE_CLIENT_URLS=“http://192.168.200.140:2379,http://master:2379”
3.3 启动集群
[root@master isshpan]# systemctl start etcd
[root@master isshpan]# systemctl enable etcd
[root@master isshpan]# etcdctl set testdir/testkey0 0
0
[root@master isshpan]# etcdctl -C http://master:2379 cluster-health
member 8e9e05c52164694d is healthy: got healthy result from http://192.168.200.140:2379
cluster is healthy
4. 部署master
4.1 安装kubernetes
若docker安装冲突,先卸载docker。
[root@master isshpan]# yum install kubernetes -y
4.2 配置kubernets
[root@master isshpan]# egrep -v “$|#” /etc/kubernetes/apiserver
KUBE_API_ADDRESS="–insecure-bind-address=0.0.0.0"
KUBE_API_PORT="–port=8080"
KUBE_ETCD_SERVERS="–etcd-servers=http://192.168.200.140:2379"
KUBE_SERVICE_ADDRESSES="–service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="–admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota"
KUBE_API_ARGS=""
[root@master isshpan]# egrep -v “$|#” /etc/kubernetes/config
KUBE_LOGTOSTDERR="–logtostderr=true"
KUBE_LOG_LEVEL="–v=0"
KUBE_ALLOW_PRIV="–allow-privileged=false"
KUBE_MASTER="–master=http://master:8080"
4.3 启动docker
所有节点执行
[root@master isshpan]# chkconfig docker on
Note: Forwarding request to ‘systemctl enable docker.service’.
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@master isshpan]# service docker start
Redirecting to /bin/systemctl start docker.service
4.4 启动kubernetes
[root@master isshpan]# systemctl enable kube-apiserver kube-controller-manager kube-scheduler
[root@master isshpan]# systemctl start kube-apiserver kube-controller-manager kube-scheduler
4.5 查看服务端口
[root@master isshpan]# netstat -tnlp
5.部署node
5.1 安装kubernetes
如果docker冲突报错,可以先卸载docker,再安装kubernetes。
[root@node1 isshpan]# yum install kubernetes
5.2 配置kubernetes
[root@node1 isshpan]# egrep -v “$|#” /etc/kubernetes/kubelet
KUBELET_ADDRESS="–address=0.0.0.0"
KUBELET_HOSTNAME="–hostname-override=192.168.200.142"
KUBELET_API_SERVER="–api-servers=http://192.168.200.140:8080"
KUBELET_POD_INFRA_CONTAINER="–pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""
[root@node1 isshpan]# egrep -v “$|#” /etc/kubernetes/config
KUBE_LOGTOSTDERR="–logtostderr=true"
KUBE_LOG_LEVEL="–v=0"
KUBE_ALLOW_PRIV="–allow-privileged=false"
KUBE_MASTER="–master=http://master:8080"
5.3 启动服务
[root@node1 isshpan]# systemctl enable kubelet kube-proxy
[root@node1 isshpan]# systemctl start kubelet kube-proxy
5.4 查看端口
yum install net-tools安装netstat。
[root@node1 isshpan]# netstat -ntlp
5.5 查看状态
master上查看集群节点状态
[root@master isshpan]# kubectl get node
NAME STATUS AGE
192.168.200.141 Ready 1m
192.168.200.142 Ready 14h
6. 配置安装flannel
6.1 安装flannel
所有节点安装
[root@node2 isshpan]# yum install flannel
6.2 配置flannel
所有节点均配置
[root@master isshpan]# cat /etc/sysconfig/flanneld | grep -v “^#”
FLANNEL_ETCD_ENDPOINTS=“http://192.168.200.140:2379”
FLANNEL_ETCD_PREFIX="/k8s/network"
[root@node1 isshpan]# cat /etc/sysconfig/flanneld | grep -v “^#”
FLANNEL_ETCD_ENDPOINTS=“http://192.168.200.140:2379”
FLANNEL_ETCD_PREFIX="/k8s/network"
master 添加网络
[root@master isshpan]# etcdctl mk //k8s/network/config ‘{“Network”:“172.8.0.0/16”}’
6.3 启动服务
所有节点启动
[root@node2 isshpan]# systemctl enable flanneld
[root@node2 isshpan]# systemctl start flanneld
6.4 重启服务
所有节点重启服务
[root@master isshpan]# for SERVICES in docker kube-apiserver kube-controller-manager kube-scheduler; do systemctl restart $SERVICES ; done
[root@node1 isshpan]# systemctl restart kube-proxy kubelet docker
然后可以通过ip a查看flannel网络
7.运行一个测试示例
7.1 查看集群状态
master节点查看
[isshpan@master ~]$ kubectl get node
NAME STATUS AGE
192.168.200.141 Ready 20m
192.168.200.142 Ready 15h
[isshpan@master ~]$ kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {“health”:“true”}
7.2 创建示例
[isshpan@master ~]$ kubectl run nginx --image=nginx --replicas=3
deployment “nginx” created
[isshpan@master ~]$ kubectl get all
如果pod状态始终不是running而是准备中,参考一下https://blog.csdn.net/qq_15206589/article/details/81513178
[root@master isshpan]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE
nginx-701339712-jmmst 1/1 Running 3 1h 172.8.50.3 192.168.200.141
nginx-701339712-nx1mg 1/1 Running 3 1h 172.8.93.2 192.168.200.142
nginx-701339712-rlz9m 1/1 Running 3 1h 172.8.50.2 192.168.200.141
[root@master isshpan]# kubectl get svc
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes 10.254.0.1 443/TCP 5d
nginx 10.254.231.198 88:31293/TCP 1h
如果此处一直不通可以尝试清理和充值iptables配置,然后重新启动服务,可能是iptables的转发问题导致数据包到不了容器内部。
node节点查看nginx主页:
[root@node1 isshpan]# curl 10.254.231.198:88
网页查看: