CentOS 8单机安装Kubernetes 1.18.x并部署NFS动态存储和MySql

最近在单台虚拟机上学习和安装了Kubernetes集群,并在其上部署了NFS动态存储卷和MySql。有兴趣的同学可以参考。

软件版本为:

CentOs: 8.2

Docker: 19.03

Kubernetes: 1.18.6/1.18.3

MySql: 5.7.22

虚拟化软件: Hyper-V

由于在此时所安装的版本较新,踩了一些坑,供同学们借鉴。

1 安装Docker

# 安装

curl https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -o /etc/yum.repos.d/docker-ce.repo
yum install https://download.docker.com/linux/fedora/30/x86_64/stable/Packages/containerd.io-1.2.13-3.2.fc30.x86_64.rpm 
yum install docker-ce

# 配置镜像加速,这里需要将xxx修改为你自己的id
cat > /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://xxxxxx.mirror.aliyuncs.com"]
}
EOF

# 启动docker服务
systemctl enable docker --now
docker info
docker version

# 这里需将IP地址修改为本机IP地址

cat >> /etc/hosts <<EOF
172.30.125.40 sharework
EOF

2 安装Kubernetes前准备

# 关闭防火墙

systemctl stop firewalld && systemctl disable firewalld

setenforce 0
sed -i '7s/enforcing/disabled/' /etc/selinux/config

cat >/etc/sysctl.d/k8s.conf <<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF

modprobe br_netfilter && sysctl -p /etc/sysctl.d/k8s.conf

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

yum -y install ipset ipvsadm

yum -y install chrony

sed -i.bak '3,6d' /etc/chrony.conf && sed -i '3cserver ntp1.aliyun.com iburst' /etc/chrony.conf

swapoff -a
sed -i '/^\/dev\/mapper\/centos-swap/c#/dev/mapper/centos-swap swap                    swap    defaults        0 0' /etc/fstab
free -m

cat >>/etc/sysctl.d/k8s.conf <<EOF
vm.swappiness=0
EOF

sysctl -p /etc/sysctl.d/k8s.conf

sed -i.bak "s#^ExecStart=/usr/bin/dockerd.*#ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd#g" /usr/lib/systemd/system/docker.service

systemctl daemon-reload && systemctl restart docker

cat >/etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
        http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

3 安装Kubernetes

yum -y install kubelet-1.18.6 kubeadm-1.18.6 kubectl-1.18.6

kubeadm version

systemctl enable kubelet --now

yum -y install bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
echo "source <(kubectl completion bash)" >> ~/.bashrc

4 配置Kubernetes集群

# 目前aliyuncs只支持1.18.3

cat <<EOF > ./kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.18.3    
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
controlPlaneEndpoint: "172.30.125.40:6443"    
networking:
  serviceSubnet: "10.96.0.0/16"    
  podSubnet: "10.20.0.1/16"    
  dnsDomain: "cluster.local"
EOF

# ensure the available CPUs at least 2

kubeadm init --config=kubeadm-config.yaml --upload-certs

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

 

5 安装Calico

curl https://docs.projectcalico.org/manifests/tigera-operator.yaml -o tigera-operator.yaml
kubectl create -f tigera-operator.yaml
curl  https://docs.projectcalico.org/manifests/custom-resources.yaml -o calico-resources.yaml
# replace 192.168.0.1/16 with 10.20.0.1/16
vi calico-resources.yaml
kubectl create -f calico-resources.yaml

# 这里tigera-operator可能无法下载,可以从其他方式下载或者导入,例如

# docker pull quay.io/tigera/operator:v1.7.1
docker save quay.io/tigera/operator:v1.7.1 > tigera-operator-1.7.1.tar.gz
scp -P <ssh port> <user name>@<host>:~/tigera-operator-1.7.1.tar.gz .
docker load -i tigera-operator-1.7.1.tar.gz

6 Kubernetes后续操作

# 安装kubens以便切换当前namespace

yum install -y git
git clone https://github.com.cnpmjs.org/ahmetb/kubectx
cp kubectx/kubens /usr/local/bin
kubens

 

# 为Master移除taint标志以便pod可以调度到master节点

kubectl taint node sharework node-role.kubernetes.io/master-

 

7 添加硬盘及挂载

# 为虚拟机增加磁盘
# 检测到新的磁盘
echo "- - -" >  /sys/class/scsi_host/host0/scan

# 查看磁盘信息
lsblk
fdisk -l

# 定义分区

fdisk /dev/sdb # n p 1 w

# 格式化为xfs
mkfs.xfs /dev/sdb1

# 挂载

mkdir /mnt/sdb
mount /dev/sdb1  /mnt/sdb
vi /etc/fstab
/dev/sdb1 /mnt/sdb xfs defaults 0 0

8 安装NFS

mkdir /mnt/sdb/nfs
yum -y install nfs-utils rpcbind
systemctl enable --now nfs-server rpcbind
cat /proc/fs/nfsd/versions

cat /etc/passwd
# 保证1100这个id不会重复
useradd -u 1100 -s /sbin/nologin -M nfsnobody
id nfsnobody

chown nfsnobody /mnt/sdb/nfs
chgrp nfsnobody /mnt/sdb/nfs
chmod 755 /mnt/sdb/nfs
ls -l /mnt/sdb

vi /etc/exports
/mnt/sdb/nfs *(rw,sync,no_root_squash,anonuid=1100,anongid=1100)

exportfs -ra
exportfs -v
showmount -e localhost

9 创建动态存储

cat <<EOF > ./nfs/rbac.yaml
---
apiVersion: v1
kind: Namespace
metadata:
  name: nfs
  annotations:
  labels:
    name: nfs

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    # replace with namespace where provisioner is deployed
    namespace: nfs
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
EOF

# 这里需要根据NFS的实际配置进行修改

cat <<EOF > ./nfs/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner
  labels:
    app: nfs-client-provisioner
  # replace with namespace where provisioner is deployed
  namespace: nfs
spec:
  replicas: 1
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: registry.cn-hangzhou.aliyuncs.com/open-ali/nfs-client-provisioner:latest # quay.io/external_storage/nfs-client-provisioner:latest
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: fuseim.pri/ifs
            - name: NFS_SERVER
              value: 172.30.125.40
            - name: NFS_PATH
              value: /mnt/sdb/nfs
      volumes:
        - name: nfs-client-root
          nfs:
            server: 172.30.125.40
            path: /mnt/sdb/nfs
EOF

# replace the address and path of nfs service
vi ./nfs/deployment.yaml

cat <<EOF > ./nfs/class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
reclaimPolicy: Retain
parameters:
  archiveOnDelete: "false"
EOF

kubectl create -f ./nfs/rbac.yaml
kubectl create -f ./nfs/deployment.yaml
kubectl create -f ./nfs/class.yaml

10 安装MySql

 

# 调整Kubernetes允许开放的端口

vi /etc/kubernetes/manifests/kube-apiserver.yaml
- --service-node-port-range=10000-65535

systemctl daemon-reload
systemctl restart kubelet

# 定义PVC


cat <<EOF > ./mysql/mysql57-pvc.yaml
---
apiVersion: v1
kind: Namespace
metadata:
  name: mysql57
  annotations:
  labels:
    name: mysql57
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql57-pvc
  namespace: mysql57
  labels:
    app: mysql57-pvc
spec:
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 2Gi
  storageClassName: managed-nfs-storage
EOF

# 定义Deployment,这里根据需要修改root密码

cat <<EOF > ./mysql/mysql57-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql57-deploy
  namespace: mysql57
spec:
  replicas: 1
  selector:
    matchLabels:
      name: mysql57-pod
  template:
    metadata:
      labels:
        name: mysql57-pod
    spec:
      containers:
        - name: mysql
          image: mysql:5.7.22
          imagePullPolicy: IfNotPresent
          env:
          - name: MYSQL_ROOT_PASSWORD
            value: xxxxxx    # pure digitals is not allowed
          ports:
            - containerPort: 3306
          volumeMounts:
            - name: mysql-persistent-storage
              mountPath: "/var/lib/mysql"
      volumes:
        - name: mysql-persistent-storage
          persistentVolumeClaim:
            claimName: mysql57-pvc
EOF

# 定义Service,这里根据需要修改映射的端口

cat <<EOF > ./mysql/mysql57-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: mysql57-svc
  namespace: mysql57
  labels:
    name: mysql57-svc
spec:
  type: NodePort
  ports:
  - port: 3306
    protocol: TCP
    targetPort: 3306
    name: http
    nodePort: <mysql internal port>
  selector:
    name: mysql57-pod
EOF

# 在Kubernetes中部署MySql 5.7

kubectl create -f ./mysql/mysql57-pvc.yaml
kubectl create -f ./mysql/mysql57-deployment.yaml
kubectl create -f ./mysql/mysql57-svc.yaml

# 添加物理主机到虚拟机的端口映射,需要修改ip地址及端口

# add the port of service to the mapping of ports between VM and the host
# For Hyper-V, run following in command line with Administrator privilege
netsh interface portproxy add v4tov4 listenport=<mysql port> connectaddress=172.30.125.40 connectport=<mysql internal port>

 

11 结束

这样就可以在Kubernetes集群之外访问MySql了。

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值