kubeadm安装最新k8s环境

kubeadm安装最新k8s环境

IP地址(NAT) 主机名 描述
192.168.6.190 k8s-master Master/Etcd节点
192.168.6.191 k8s-node1 Node节点
192.168.6.192 k8s-node2 Node节点

Service网段 10.33.0.0/16
Pod网段 10.44.0.0/16

Node环境部署

标准化网卡

sed -i 's/crashkernel=auto/crashkernel=auto net.ifnames=0 biosdevname=0/g' /etc/default/grub
mv /etc/sysconfig/network-scripts/ifcfg-ens192 /etc/sysconfig/network-scripts/ifcfg-eth0
sed -i 's/ens192/eth0/g' /etc/sysconfig/network-scripts/ifcfg-eth0
grub2-mkconfig -o /etc/grub2.cfg
reboot

初始化

echo "net.ipv6.conf.all.disable_ipv6=1" >> /etc/sysctl.conf
yum install wget telnet -y
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum clean all
yum makecache
systemctl disable postfix
yum -y install lrzsz net-tools tree chrony
systemctl start chronyd
systemctl enable chronyd
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
setenforce 0
systemctl disable firewalld.service
systemctl stop firewalld.service 

升级内核系统

rpm -import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm
yum --disablerepo="*" --enablerepo="elrepo-kernel" list available
yum -y --enablerepo=elrepo-kernel install kernel-ml.x86_64 kernel-ml-devel.x86_64
sed -i 's/GRUB_DEFAULT=/GRUB_1/' /etc/default/grub
grub2-mkconfig -o /boot/grub2/grub.cfg
yum update
reboot

环境配置

cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables  = 1
net.ipv4.ip_forward                 = 1
EOF
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf

yum install ipset
yum install ipvsadm
yum install -y yum-utils device-mapper-persistent-data lvm2

IPVS负载均衡 使用

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack_ipv4

安装docker

#################
(K8S不支持最新docker 请根据提示下载对应版本)
# vim /etc/yum.repos.d/docker-ce.repo
#  将 [docker-ce-test] 下方的 enabled=0 修改为 enabled=1
yum makecache
yum list docker-ce --showduplicates | sort -r
安装指定版本  例如( docker-ce.x86_64        3:18.09.9-2.1.rc1.el7  )
yum install docker-ce-18.09.9-2.1.rc1.el7
#################

wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
yum install -y docker-ce

mkdir -p /etc/docker/
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"]
}
EOF

systemctl enable docker
systemctl start docker

安装k8s源

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装k8s node组件

yum install -y kubelet kubeadm kubectl
systemctl enable kubelet

到这完成初始化node环境完成。 做一个还原点。

克隆两个分别为 node1和node2

更改克隆的node1 node2的 hostaname 和 ip

hostnamectl set-hostname node1
更改IP

k8s-master 安装

cat > /etc/hosts <<EOF
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.6.190 k8s-master
192.168.6.191 k8s-node1
192.168.6.192 k8s-node2
EOF


kubeadm init \
--apiserver-advertise-address=192.168.6.190 \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.14.1 \
--service-cidr=10.33.0.0/16 \
--pod-network-cidr=10.44.0.0/16

找到这个。很重要。这是node加入master的信息。

kubeadm join 192.168.6.190:6443 --token gvxath.030zgc291syups8v
–discovery-token-ca-cert-hash sha256:91ca783858fbe9806560e8253ec47fe734addba3c8ee64ddbeace077a5101aee

根据提示运行

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

安装flannel 网络

wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
sed -i 's/10.244.0.0/10.44.0.0/' ./kube-flannel.yml
#### 这里要匹配pod-network-cidr地址。。
kubectl apply -f  kube-flannel.yml

开启IPVS负载均衡

kubectl edit cm kube-proxy -n kube-system

更改mode: "" 为 mode: "ipvs"
    metricsBindAddress: 127.0.0.1:10249
    mode: "ipvs"
    nodePortAddresses: null
	
#重启全部kube-proxy
kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'

查看是否全部运行成功。

kubectl get pod --all-namespaces
#### 查看下面所有必须running.. 这样master 节点安装完成。
NAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGE
kube-system   coredns-8686dcc4fd-9tggk             1/1     Running   0          2m31s
kube-system   coredns-8686dcc4fd-z4d5j             1/1     Running   0          2m31s
kube-system   etcd-k8s-master                      1/1     Running   0          92s
kube-system   kube-apiserver-k8s-master            1/1     Running   0          109s
kube-system   kube-controller-manager-k8s-master   1/1     Running   0          93s
kube-system   kube-flannel-ds-amd64-5b46d          1/1     Running   0          68s
kube-system   kube-proxy-k8lj8                     1/1     Running   0          2m31s
kube-system   kube-scheduler-k8s-master            1/1     Running   0          103s

Node1 Node2 安装

添加hosts 信息

cat > /etc/hosts <<EOF
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.6.190 k8s-master
192.168.6.191 k8s-node1
192.168.6.192 k8s-node2
EOF

#刚刚生成的 加入验证 (都不一样。须看自己的是多少。)
kubeadm join 192.168.6.190:6443 --token gvxath.030zgc291syups8v \
    --discovery-token-ca-cert-hash sha256:91ca783858fbe9806560e8253ec47fe734addba3c8ee64ddbeace077a5101aee 

查看 node是否加入成功

[root@k8s-master ~]# kubectl get node
NAME         STATUS   ROLES    AGE     VERSION
k8s-master   Ready    master   5m19s   v1.14.1
k8s-node1    Ready    <none>   116s    v1.14.1
k8s-node2    Ready    <none>   110s    v1.14.1

给node 打标签。

kubectl label nodes k8s-node2 node-role.kubernetes.io/node2=  
kubectl label nodes k8s-node1 node-role.kubernetes.io/node1=
[root@k8s-master ~]# kubectl get nodes
NAME         STATUS   ROLES    AGE     VERSION
k8s-master   Ready    master   9m49s   v1.14.1
k8s-node1    Ready    node1    6m26s   v1.14.1
k8s-node2    Ready    node2    6m20s   v1.14.1

删除标签

删除label,只需要在命令行最后指定label的key名,并加一个减号即可:

kubectl label nodes k8s-node2 node-role.kubernetes.io/node2-

下面是一些常用命令

创建一个默认实例

kubectl create deployment nginx --image=nginx:alpine

查看创建的pod详细信息。

kubectl get pod -o wide

更改实例数量

kubectl scale deployment nginx --replicas=2

打开实例的端口

kubectl expose deployment nginx --port=80

查看service信息

kubectl get service

查看网络端口连接等信息

ipvsadm -Ln

查看node信息

kubectl get node

查看hpa信息

kubectl get deployment,svc,hpa

发布了111 篇原创文章 · 获赞 5 · 访问量 1万+
展开阅读全文

没有更多推荐了,返回首页

©️2019 CSDN 皮肤主题: 大白 设计师: CSDN官方博客

分享到微信朋友圈

×

扫一扫,手机浏览