OpenEular(欧拉)操作系统23.03搭建kubernetes

kubeadm安装Kubernetes v1.24.0 docker

1. 环境准备

1、配置hosts

cat <<EOF >> /etc/hosts
172.30.28.163  master-163
172.30.28.164  node-164
EOF

2.关闭防火墙、selinux和swap。

systemctl stop firewalld
systemctl disable firewalld
setenforce 0 #实时动态关闭
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config #禁止重启后自动开启
swapoff -a #实时动态关闭
sed -i '/ swap / s/^/#/' /etc/fstab #禁止重启后自动开启

2.配置内核参数,将桥接的IPv4流量传递到iptables的链

cat <<EOF >/etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF

modprobe br_netfilter  #执行该命令 如果不执行就会在应用k8s.conf时出现加载错误

sysctl -p /etc/sysctl.d/k8s.conf #应用配置文件

5.安装常用工具

yum clean all 
yum makecache
yum update -y
yum install -y  crictl socat conntrack

6.主机时间同步

yum install -y ntp
systemctl enable ntpd && systemctl start ntpd
cat <<EOF >/etc/ntp.conf 
driftfile /var/lib/ntp/drift
restrict default nomodify notrap nopeer noquery
restrict 127.0.0.1
restrict ::1
server ntp.aliyun.com iburst
includefile /etc/ntp/crypto/pw
keys /etc/ntp/keys
disable monitor
EOF
systemctl restart ntpd

2. 软件安装

1.安装docker

# step 1: 安装必要的一些系统工具
yum install -y device-mapper-persistent-data lvm2 crictl
# Step 2: 开启Docker服务
yum -y install docker
systemctl start docker
# Step 3: 开机自启
systemctl enable docker

# 修改cgroup
cat > /etc/docker/daemon.json << EOF
{
    "exec-opts": ["native.cgroupdriver=systemd"],
    "registry-mirrors":["https://docker.mirrors.ustc.edu.cn/"]
}
EOF

systemctl daemon-reload
systemctl restart docker

docker服务为容器运行提供计算资源,是所有容器运行的基本平台。

2.安装ipset、ipvsadm

# ipvs安装
yum install -y ipset ipvsadm

# 配置ipvsadm
cat > /etc/sysconfig/modules/ipvs.module <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_sh
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- nf_conntrack
EOF

# 授权运行
chmod 755 /etc/sysconfig/modules/ipvs.module 
/etc/sysconfig/modules/ipvs.module

3.安装cri-docker

# x86
## 下载文件
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.4/cri-dockerd-0.3.4.amd64.tgz
## 解压
tar -xvf cri-dockerd-0.3.4.amd64.tgz
# arm
## 下载文件
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.3.4/cri-dockerd-0.3.4.arm64.tgz
## 解压
tar -xvf cri-dockerd-0.3.4.arm64.tgz

# 复制二进制文件到指定目录
cp cri-dockerd/cri-dockerd /usr/bin/

# 配置启动文件
cat <<"EOF" > /usr/lib/systemd/system/cri-docker.service
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket

[Service]
Type=notify

ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint=unix:///var/run/cri-docker.sock --network-plugin=cni --cni-bin-dir=/opt/cni/bin \
          --cni-conf-dir=/etc/cni/net.d --image-pull-progress-deadline=30s --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.7 \
          --docker-endpoint=unix:///var/run/docker.sock --cri-dockerd-root-directory=/var/lib/docker
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always

# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3

# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s

# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity

# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
Delegate=yes
KillMode=process

[Install]
WantedBy=multi-user.target

EOF

# 生成socket 文件
cat <<"EOF" > /usr/lib/systemd/system/cri-docker.socket
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service

[Socket]
ListenStream=/var/run/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker

[Install]
WantedBy=sockets.target

EOF

# 启动 cri-dockerd
systemctl daemon-reload
systemctl start cri-docker
#设置开机启动
systemctl enable cri-docker
# 查看启动状态
systemctl status cri-docker

3.安装kubeadm、kubelet、kubectl

# master执行
yum install -y kubernetes-kubelet kubernetes-kubeadm kubernetes-master 
# node执行
yum install -y kubernetes-kubelet kubernetes-kubeadm kubernetes-master 

systemctl enable kubelet

cat <<EOF > /etc/sysconfig/kubelet
KUBELET_EXTRA_ARGS="--cgroup-driver=systemd"
EOF

Kubelet负责与其他节点集群通信,并进行本节点Pod和容器生命周期的管理。

Kubeadm是Kubernetes的自动化部署工具,降低了部署难度,提高效率。

Kubectl是Kubernetes集群管理工具。

4、部署master 节点

获取默认的初始化参数文件

kubeadm config print init-defaults > init.default.yaml

修改初始化参数文件

apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 172.30.28.163 #本机ip
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/cri-docker.sock # 指定cri-docker
  imagePullPolicy: IfNotPresent
  name: master-163 # 本机主机名
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers # 阿里镜像仓库
kind: ClusterConfiguration
kubernetesVersion: 1.24.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.1.0.0/16 # svc子网
  podSubnet: 10.224.0.0/16 # pod子网
scheduler: {}

# 指定ipvs
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs

查看和拉取K8S集群需要的镜像

kubeadm config images list --config=init.default.yaml
kubeadm config images pull --config=init.default.yaml --v=5

运行kubeadm init命令安装master

kubeadm init --config=init.default.yaml
# 如果初始化失败,可以重置 
kubeadm reset --cri-socket unix:///var/run/cri-docker.sock

复制配置文件到home目录下

mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

添加节点(在node节点执行)

kubeadm join 172.30.22.163:6443 --token abcdef.0123456789abcdef \
	--discovery-token-ca-cert-hash sha256:6abcc08c10867f175668756b992eaceed2e1ab54d21bebfe14d70107429342ef \
	 --cri-socket unix:///var/run/cri-docker.sock

5、安装calico网络插件

wget https://docs.projectcalico.org/v3.23/manifests/calico.yaml

# 修改pod子网
vim calico.yaml
- name: CALICO_IPV4POOL_CIDR
  value: "10.224.0.0/16"

kubectl create -f calico.yaml

node为Ready

[root@master-163 ~]# kubectl get node
NAME      STATUS      ROLES           AGE   VERSION
master-163   Ready    control-plane   5m   v1.24.1
node-164     Ready    <none>          4m   v1.24.0
[root@master-163 ~]# kubectl get pod -A 
NAMESPACE     NAME                                     READY   STATUS    RESTARTS      AGE
kube-system   calico-kube-controllers-d569cccf-vfpl5   1/1     Running   2
10m
kube-system   calico-node-8ppfs                        1/1     Running   1 
10m
kube-system   calico-node-gfcvp                        1/1     Running   0             10m
kube-system   coredns-74586cf9b6-kn7h5                 1/1     Running   0             15m
kube-system   coredns-74586cf9b6-qdhnn                 1/1     Running   0             15m
kube-system   etcd-master-163                          1/1     Running   0             15m
kube-system   kube-apiserver-master-163                1/1     Running   0             15m
kube-system   kube-controller-manager-master-163       1/1     Running   0             15m
kube-system   kube-proxy-455t4                         1/1     Running   1
14m
kube-system   kube-proxy-4cgsz                         1/1     Running   0             15m
kube-system   kube-scheduler-master-163                1/1     Running   0             15m

6、部署nginx

cat >  nginx.yaml  << "EOF"
---
apiVersion: v1
kind: ReplicationController
metadata:
  name: nginx-web
spec:
  replicas: 2
  selector:
    name: nginx
  template:
    metadata:
      labels:
        name: nginx
    spec:
      containers:
        - name: nginx
          image: nginx:1.19.6
          ports:
            - containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-service-nodeport
spec:
  ports:
    - port: 80
      targetPort: 80
      nodePort: 30001
      protocol: TCP
  type: NodePort
  selector:
    name: nginx
EOF

查看nginx svc 端口

[root@master-163 ~]# kubectl create -f nginx.yaml
replicationcontroller/nginx-web created
service/nginx-service-nodeport created
[root@master-163 ~]# kubectl get pods
NAME              READY   STATUS    RESTARTS   AGE
nginx-web-67v2z   1/1     Running   0          9s
nginx-web-cj895   1/1     Running   0          9s
[root@master-163 ~]# kubectl get svc
NAME                     TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)        AGE
kubernetes               ClusterIP   10.1.0.1       <none>        443/TCP        20m
nginx-service-nodeport   NodePort    10.1.213.151   <none>        80:30001/TCP   9s
[root@master-163 ~]# curl 172.30.28.163:30001

  • 0
    点赞
  • 4
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值