(所有机子都要操作)机子环境的准备
systemctl stop firewalld && systemctl disable firewalld
sed -i 's/enforcing/disabled/g' /etc/selinux/config
echo 192.168.1.100 master >> /etc/hosts
echo 192.168.1.110 node1 >> /etc/hosts
echo 192.168.1.120 node2 >> /etc/hosts
hostnamectl set-hostname master
hostnamectl set-hostname node1
hostnamectl set-hostname node2
swapoff -a
reboot
#最好这时候能重启一下机器
(所有机子都要操作)安装最新的docker-ce,并设置好阿里的镜像加速器
#配置yum源,安装docker-ce
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum makecache fast
yum list | grep docker-ce
yum -y install docker-ce
#配置阿里的镜像加速器
mkdir -p /etc/docker
tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://jdnefphb.mirror.aliyuncs.com"]
}
EOF
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
(所有机子都要操作)安装kubeadm,kubelet,kubectl
#配置源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum clean all && yum -y makecache
yum install -y kubelet kubeadm kubectl
systemctl enable kubelet && systemctl start kubelet
(master机子上操作即可)查看kubernetes需要的版本,然后部署上
#查看版本
kubeadm config images list
#初始化
kubeadm init --image-repository=registry.aliyuncs.com/google_containers --pod-network-cidr=10.244.0.0/16 --kubernetes-version=v1.18.0
#输入效果如下则成功
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.50.100:6443 --token 9fd8ik.vwghkvoeqp8cx7y6 \
--discovery-token-ca-cert-hash sha256:64497132f39241b16fc9f89b0195b716782764044a72ccc8078d19e0ab06dbcf
#按照输入内容提示,执行这三条命令配置一下环境
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
(nodes机子上操作)加入集群
#复制上面master节点上输出的信息
kubeadm join 192.168.50.100:6443 --token 9fd8ik.vwghkvoeqp8cx7y6 \
--discovery-token-ca-cert-hash sha256:64497132f39241b16fc9f89b0195b716782764044a72ccc8078d19e0ab06dbcf
(master机子上操作)查看集群信息,并加入flannel网络组件
#现在是未加入网络组件的集群状态,可以看到 STATUS 处集群都是Notready。
kubectl get node
NAME STATUS ROLES AGE VERSION
master NotReady master 3m57s v1.18.1
node1 NotReady <none> 3m23s v1.18.1
node2 NotReady <none> 6s v1.18.1
#这里使用的网络组件时flannel
#拉取github上的flannel,并进行安装
git clone https://github.com/coreos/flannel.git
kubectl apply -f flannel/Documentation/kube-flannel.yml
#这时候加入了网络组件后,在查看下集群信息
kubectl get node
NAME STATUS ROLES AGE VERSION
master Ready master 16m v1.18.1
node1 Ready <none> 15m v1.18.1
node2 Ready <none> 12m v1.18.1
#可以看到 STATUS 都已经 Ready 状态了,那就证明成功了。
常用命令
#查看集群节点的状态
kubectl get node
#查看pod
kubectl get pod
#查看所有的pod
kubectl get all -A
#查看某名称空间内的pod
kubectl get pod -n <NameSpace>
#查看svc(service)
kubect get svc
#查看某名称空间内的所有pod,相对详细的信息版
kubectl get pod -o wide -n <NameSpace>
#查看pod更详尽的状态信息
kubectl describe pod <podName>
kubectl describe pod <podName> -n <NameSpace>
使用yaml文件格式创建一个pod示例,来一段里程碑式的输出
[root@master k8s_yaml]# vim busybox.yaml
apiVersion: v1
kind: Pod
metadata:
name: test-pod
labels:
app: test
spec:
containers:
- name: test-container
image: busybox
command: ['sh','-c','echo hello world! && sleep 3600']
#根据上面些的文件创建pod
[root@master k8s_yaml]# kubectl apply -f busybox.yaml
pod/test-pod created
#查看一下pod是否正常运行
[root@master k8s_yaml]# kubectl get pod
NAME READY STATUS RESTARTS AGE
test-pod 1/1 Running 0 4s
#看下pod输出内容是否正确
[root@master k8s_yaml]# kubectl logs -f test-pod
hello world!
#删除pod,根据刚才写的文件进行删除
[root@master k8s_yaml]# kubectl delete -f busybox.yaml
pod "test-pod" deleted