kubernetes集群使用GPU及安装kubeflow1.0.RC

kubernetes集群使用GPU及安装kubeflow1.0.RC操作步骤

安装显卡驱动

安装CUDA
sudo yum-config-manager --add-repo http://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/cuda-rhel7.repo
sudo yum clean all
sudo yum -y install nvidia-driver-latest-dkms cuda
sudo yum -y install cuda-drivers

如缺少gcc依赖,则实行如下命令

  yum install kernel-devel kernel-doc kernel-headers gcc\* glibc\*  glibc-\*
安装nvidia驱动
 rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
 rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
 yum install -y kmod-nvidia
禁用nouvean
###在GRUB_CMDLINE_LINUX添加 rdblacklist=nouveau 项
echo -e "blacklist nouveau\noptions nouveau modeset=0" > /etc/modprobe.d/blacklist.conf

重启,查看nouveau是否被禁用成功

lsmod|grep nouv
没有任何输出,则表示nouveau已被禁用

查看服务器显卡信息

[root@master ~]# nvidia-smi
Tue Jan 14 03:46:41 2020
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 440.44       Driver Version: 440.44       CUDA Version: 10.2     |
|-------------------------------+----------------------+----------------------+
| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |
| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |
|===============================+======================+======================|
|   0  Tesla T4            Off  | 00000000:18:00.0 Off |                    0 |
| N/A   29C    P8    10W /  70W |      0MiB / 15109MiB |      0%      Default |
+-------------------------------+----------------------+----------------------+
|   1  Tesla T4            Off  | 00000000:86:00.0 Off |                    0 |
| N/A   25C    P8     9W /  70W |      0MiB / 15109MiB |      0%      Default |
+-------------------------------+----------------------+----------------------+

+-----------------------------------------------------------------------------+
| Processes:                                                       GPU Memory |
|  GPU       PID   Type   Process name                             Usage      |
|=============================================================================|
|  No running processes found                                                 |
+-----------------------------------------------------------------------------+

安装NVIDIA-DOCKER

下载nvidia-docker.repo文件
curl -s -L https://nvidia.github.io/nvidia-docker/centos7/x86_64/nvidia-docker.repo | sudo tee /etc/yum.repos.d/nvidia-docker.repo  
查找NVIDIAdocker版本
yum search --showduplicates nvidia-docker
安装NVIDIA-docker

docker版本为docker18.09.7.ce,所以安装下述NVIDIAdocker版本

yum install -y nvidia-docker2
pkill -SIGHUP dockerd

nvidia-docker version 可查看已安装的nvidia docker版本

修改docker runtimes为nvidia-docker
[root@ks-allinone ~]# cat /etc/docker/daemon.json
{
    "default-runtime": "nvidia",
    "runtimes": {
        "nvidia": {
            "path": "nvidia-container-runtime",
            "runtimeArgs": []
        }
    },
 "registry-mirrors": ["https://o96k4rm0.mirror.aliyuncs.com"]
}

重启docker及k8s

systemctl daemon-reload
systemctl restart docker.service
systemctl restart kubelet
安装gpushare-scheduler-extender
cd /etc/kubernetes/
curl -O https://raw.githubusercontent.com/AliyunContainerService/gpushare-scheduler-extender/master/config/scheduler-policy-config.json
cd /tmp/
curl -O https://raw.githubusercontent.com/AliyunContainerService/gpushare-scheduler-extender/master/config/gpushare-schd-extender.yaml
kubectl create -f gpushare-schd-extender.yaml
安装device-plugin-rabc
kubectl create -f device-plugin-rbac.yaml
# rbac.yaml
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: gpushare-device-plugin
rules:
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - events
  verbs:
  - create
  - patch
- apiGroups:
  - ""
  resources:
  - pods
  verbs:
  - update
  - patch
  - get
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes/status
  verbs:
  - patch
  - update
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: gpushare-device-plugin
  namespace: kube-system
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: gpushare-device-plugin
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: gpushare-device-plugin
subjects:
- kind: ServiceAccount
  name: gpushare-device-plugin
  namespace: kube-system
安装device-plugin-ds插件
kubectl create -f device-plugin-ds.yaml

apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: gpushare-device-plugin-ds
  namespace: kube-system
spec:
  template:
    metadata:
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ""
      labels:
        component: gpushare-device-plugin
        app: gpushare
        name: gpushare-device-plugin-ds
    spec:
      serviceAccount: gpushare-device-plugin
      hostNetwork: true
      nodeSelector:
        gpushare: "true"
      containers:
      - image: registry.cn-hangzhou.aliyuncs.com/acs/k8s-gpushare-plugin:v2-1.11-35eccab
        name: gpushare
        # Make this pod as Guaranteed pod which will never be evicted because of node's resource consumption.
        command:
          - gpushare-device-plugin-v2
          - -logtostderr
          - --v=5
          #- --memory-unit=Mi
        resources:
          limits:
            memory: "300Mi"
            cpu: "1"
          requests:
            memory: "300Mi"
            cpu: "1"
        env:
        - name: KUBECONFIG
          value: /etc/kubernetes/kubelet.conf
        - name: NODE_NAME
          valueFrom:
            fieldRef:
              fieldPath: spec.nodeName
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            drop: ["ALL"]
        volumeMounts:
          - name: device-plugin
            mountPath: /var/lib/kubelet/device-plugins
      volumes:
        - name: device-plugin
          hostPath:
            path: /var/lib/kubelet/device-plugins

参考
https://github.com/AliyunContainerService/gpushare-scheduler-extender
https://github.com/AliyunContainerService/gpushare-device-plugin

为共享节点打上gpushare标签

kubectl label node mynode gpushare=true

安装扩展

curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.12.1/bin/linux/amd64/kubectl
chmod +x ./kubectl
sudo mv ./kubectl /usr/bin/kubectl

cd /usr/bin/
wget https://github.com/AliyunContainerService/gpushare-device-plugin/releases/download/v0.3.0/kubectl-inspect-gpushare
chmod u+x /usr/bin/kubectl-inspect-gpushare
kubectl inspect gpushare ##查看集群GPU使用情况
安装k8s负载均衡(v0.8.2)(可选)
wget https://raw.githubusercontent.com/google/metallb/v0.7.3/manifests/metallb.yaml

kubectl apply -f  metallb.yaml

metallb-config.yaml

apiVersion: v1
kind: ConfigMap
metadata:
  namespace: metallb-system
  name: config
data:
  config: |
    address-pools:
    - name: default
      protocol: layer2
      addresses:
      - 10.18.5.30-10.18.5.50

kubectl apply -f metallb-config.yaml
测试tensorflow
kubectl apply -f  tensorflow.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: tensorflow-gpu
spec:
  replicas: 1
  template:
    metadata:
      labels:
        name: tensorflow-gpu
    spec:
      containers:
        - name: tensorflow-gpu
          image: tensorflow/tensorflow:1.15.0-py3-jupyter
          imagePullPolicy: Never
          resources:
            limits:
              aliyun.com/gpu-mem: 1024
          ports:
          - containerPort: 8888
---
apiVersion: v1
kind: Service
metadata:
  name: tensorflow-gpu
spec:
  ports:
  - port: 8888
    targetPort: 8888
    nodePort: 30888
    name: jupyter
  selector:
    name: tensorflow-gpu
  type: NodePort

查看集群GPU使用情况

[root@master ~]# kubectl inspect gpushare
NAME    IPADDRESS   GPU0(Allocated/Total)  GPU1(Allocated/Total)  GPU Memory(MiB)
master  10.18.5.20  1024/15109              0/15109                1024/30218
node    10.18.5.21  0/15109                0/15109                0/30218
------------------------------------------------------------------
Allocated/Total GPU Memory In Cluster:
1024/60436 (1%)
[root@master ~]#

可通过动态伸缩tensorflow service 的节点数量以及修改单个节点的显存大小测试GPU使用情况

 kubectl scale --current-replicas=1 --replicas=100 deployment/tensorflow-gpu

经测试,得出以下测试结果:

环境

节点GPU个数GPU内存
master215109M*2=30218M
node215109M*2=30218M

测试结果

podGpupod个数gpu利用率
256M18377%
512M11698%
1024M5694%

安装kubeflow(V1.0.RC)

安装ks
  tar -vxf ks_0.12.0_linux_amd64.tar.gz
  cp ks_0.12.0_linux_amd64/* /usr/local/bin/
安装kuberflow
下载安装包

kfctl_v1.0-rc.3-1-g24b60e8_linux.tar.gz

tar -zxvf kfctl_v1.0-rc.3-1-g24b60e8_linux.tar.gz
cp kfctl  /usr/bin/

准备工作,创建PV及PVC,使用NFS作为文件存储

创建storageclass

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: local-path
  namespace: kubeflow
#provisioner: example.com/nfs
provisioner: kubernetes.io/gce-pd
parameters:
  type: pd-ssd

kubectl create -f storage.yml
yum install nfs-utils rpcbind
#创建NFS挂载目录(至少需要四个)
mkdir -p /data/nfs

vim /etc/exports
#添加上面的挂载目录
/data/nfs 192.168.122.0/24(rw,sync)

systemctl restart nfs-server.service

创建PV,因多个pod挂载文件可能重名,所以最好创建多个PV由pod选择挂载(至少4个,分别供katib-mysql,metadata-mysql,minio,mysql挂载)

[root@master pv]# cat mysql-pv.yml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: local-path  #不同的PVC需要修改
spec:
  capacity:
    storage: 200Gi
  accessModes:
    - ReadWriteOnce
  persistentVolumeReclaimPolicy: Recycle
  storageClassName: local-path
  nfs:
    path: /data/nfs #不同的PVC需要修改
    server: 10.18.5.20

创建命名空间 kubeflow-anonymous

kubectl create namespace kubeflow-anonymous

下载kubeflow1.0.RC yml文件, https://github.com/kubeflow/manifests/blob/v1.0-branch/kfdef/kfctl_k8s_istio.yaml

[root@master 2020-0219]# cat kfctl_k8s_istio.yaml
apiVersion: kfdef.apps.kubeflow.org/v1
kind: KfDef
metadata:
  clusterName: kubernetes
  creationTimestamp: null
  name: 2020-0219
  namespace: kubeflow
spec:
  applications:
  - kustomizeConfig:
      parameters:
      - name: namespace
        value: istio-system
      repoRef:
        name: manifests
        path: istio/istio-crds
    name: istio-crds
  - kustomizeConfig:
      parameters:
      - name: namespace
        value: istio-system
      repoRef:
        name: manifests
        path: istio/istio-install
    name: istio-install
  - kustomizeConfig:
      parameters:
      - name: namespace
        value: istio-system
      repoRef:
        name: manifests
        path: istio/cluster-local-gateway
    name: cluster-local-gateway
  - kustomizeConfig:
      parameters:
      - name: clusterRbacConfig
        value: "OFF"
      repoRef:
        name: manifests
        path: istio/istio
    name: istio
  - kustomizeConfig:
      parameters:
      - name: namespace
        value: istio-system
      repoRef:
        name: manifests
        path: istio/add-anonymous-user-filter
    name: add-anonymous-user-filter
  - kustomizeConfig:
      repoRef:
        name: manifests
        path: application/application-crds
    name: application-crds
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: application/application
    name: application
  - kustomizeConfig:
      parameters:
      - name: namespace
        value: cert-manager
      repoRef:
        name: manifests
        path: cert-manager/cert-manager-crds
    name: cert-manager-crds
  - kustomizeConfig:
      parameters:
      - name: namespace
        value: kube-system
      repoRef:
        name: manifests
        path: cert-manager/cert-manager-kube-system-resources
    name: cert-manager-kube-system-resources
  - kustomizeConfig:
      overlays:
      - self-signed
      - application
      parameters:
      - name: namespace
        value: cert-manager
      repoRef:
        name: manifests
        path: cert-manager/cert-manager
    name: cert-manager
  - kustomizeConfig:
      repoRef:
        name: manifests
        path: metacontroller
    name: metacontroller
  - kustomizeConfig:
      overlays:
      - istio
      - application
      repoRef:
        name: manifests
        path: argo
    name: argo
  - kustomizeConfig:
      repoRef:
        name: manifests
        path: kubeflow-roles
    name: kubeflow-roles
  - kustomizeConfig:
      overlays:
      - istio
      - application
      repoRef:
        name: manifests
        path: common/centraldashboard
    name: centraldashboard
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: admission-webhook/bootstrap
    name: bootstrap
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: admission-webhook/webhook
    name: webhook
  - kustomizeConfig:
      overlays:
      - istio
      - application
      parameters:
      - name: userid-header
        value: kubeflow-userid
      repoRef:
        name: manifests
        path: jupyter/jupyter-web-app
    name: jupyter-web-app
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: spark/spark-operator
    name: spark-operator
  - kustomizeConfig:
      overlays:
      - istio
      - application
      - db
      repoRef:
        name: manifests
        path: metadata
    name: metadata
  - kustomizeConfig:
      overlays:
      - istio
      - application
      repoRef:
        name: manifests
        path: jupyter/notebook-controller
    name: notebook-controller
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: pytorch-job/pytorch-job-crds
    name: pytorch-job-crds
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: pytorch-job/pytorch-operator
    name: pytorch-operator
  - kustomizeConfig:
      overlays:
      - application
      parameters:
      - name: usageId
        value: <randomly-generated-id>
      - name: reportUsage
        value: "true"
      repoRef:
        name: manifests
        path: common/spartakus
    name: spartakus
  - kustomizeConfig:
      overlays:
      - istio
      repoRef:
        name: manifests
        path: tensorboard
    name: tensorboard
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: tf-training/tf-job-crds
    name: tf-job-crds
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: tf-training/tf-job-operator
    name: tf-job-operator
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: katib/katib-crds
    name: katib-crds
  - kustomizeConfig:
      overlays:
      - application
      - istio
      repoRef:
        name: manifests
        path: katib/katib-controller
    name: katib-controller
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: pipeline/api-service
    name: api-service
  - kustomizeConfig:
      overlays:
      - application
      parameters:
      - name: minioPvcName
        value: minio-pv-claim
      repoRef:
        name: manifests
        path: pipeline/minio
    name: minio
  - kustomizeConfig:
      overlays:
      - application
      parameters:
      - name: mysqlPvcName
        value: mysql-pv-claim
      repoRef:
        name: manifests
        path: pipeline/mysql
    name: mysql
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: pipeline/persistent-agent
    name: persistent-agent
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: pipeline/pipelines-runner
    name: pipelines-runner
  - kustomizeConfig:
      overlays:
      - istio
      - application
      repoRef:
        name: manifests
        path: pipeline/pipelines-ui
    name: pipelines-ui
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: pipeline/pipelines-viewer
    name: pipelines-viewer
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: pipeline/scheduledworkflow
    name: scheduledworkflow
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: pipeline/pipeline-visualization-service
    name: pipeline-visualization-service
  - kustomizeConfig:
      overlays:
      - application
      - istio
      parameters:
      - name: admin
        value: johnDoe@acme.com
      repoRef:
        name: manifests
        path: profiles
    name: profiles
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: seldon/seldon-core-operator
    name: seldon-core-operator
  - kustomizeConfig:
      overlays:
      - application
      parameters:
      - name: namespace
        value: knative-serving
      repoRef:
        name: manifests
        path: knative/knative-serving-crds
    name: knative-crds
  - kustomizeConfig:
      overlays:
      - application
      parameters:
      - name: namespace
        value: knative-serving
      repoRef:
        name: manifests
        path: knative/knative-serving-install
    name: knative-install
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: kfserving/kfserving-crds
    name: kfserving-crds
  - kustomizeConfig:
      overlays:
      - application
      repoRef:
        name: manifests
        path: kfserving/kfserving-install
    name: kfserving-install
  repos:
  - name: manifests
    uri: https://github.com/kubeflow/manifests/archive/master.tar.gz
  version: master
status:
  reposCache:
  - localPath: '"../.cache/manifests/manifests-master"'
    name: manifests
[root@master 2020-0219]#

#进入你的kubeflowapp目录 执行
kfctl apply -V -f kfctl_k8s_istio.yaml
#安装过程中需要从GitHub下载配置文件,可能会失败,失败时重试

在kubeflowapp平级目录下会生成kustomize文件夹,为防重启时镜像拉取失败,需修改所有镜像拉取策略为IfNotPresent
然后再次执行 kfctl apply -V -f kfctl_k8s_istio.yaml

查看运行状态

kubectl get all -n kubeflow
通过istio ingress访问kubeflowui
#修改ingeress-gateway访问方式为LoadBalancer
kubectl -n istio-system edit svc istio-ingressgateway
#修改此处为LoadBalancer
selector:
  app: istio-ingressgateway
  istio: ingressgateway
  release: istio
sessionAffinity: None
type: LoadBalancer

保存,再次查看该svc信息

[root@master 2020-0219]# kubectl -n istio-system get svc istio-ingressgateway
NAME                   TYPE           CLUSTER-IP     EXTERNAL-IP   PORT(S)                                                                                                                                      AGE
istio-ingressgateway   LoadBalancer   10.98.19.247   10.18.5.30    15020:32230/TCP,80:31380/TCP,443:31390/TCP,31400:31400/TCP,15029:31908/TCP,15030:31864/TCP,15031:31315/TCP,15032:30372/TCP,15443:32631/TCP   42h
[root@master 2020-0219]#

EXTERNAL-IP 即为外部访问地址,访问http://10.18.5.30 即可进入kubeflow主页

关于镜像拉取,gcr镜像国内无法拉取,可以通过如下方式拉取

curl -s https://zhangguanzhang.github.io/bash/pull.sh | bash -s -- 镜像信息

若上述方法也无法拉取,可以使用阿里云手动构建镜像方式使用海外服务器构建

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 3
    评论
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值