K8S部署Hadoop集群

K8S部署Hadoop集群

1、拉取镜像
到docker hub上拉取Hadoop镜像
[root@k8s-master hadoop]# docker pull kubeguide/hadoop:latest

#### 2、复制镜像到其他节点

```sh
# 打包镜像到本地
[root@k8s-master hadoop]# docker save e0af06208032 > hadoop.tar
# 发送到子节点
[root@k8s-master hadoop]# scp hadoop.tar k8s-node01:/root/hadoop
[root@k8s-master hadoop]# scp hadoop.tar k8s-node02:/root/hadoop
# 子节点构建镜像
[root@k8s-node01 hadoop]# docker load --input hadoop.tar
[root@k8s-node02 hadoop]# docker load --input hadoop.tar
3、编写hadoop.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: kube-hadoop-conf
data:
  HDFS_MASTER_SERVICE: hadoop-hdfs-master
  HDOOP_YARN_MASTER: hadoop-yarn-master
---
apiVersion: v1
kind: Service
metadata:
  name: hadoop-hdfs-master
spec:
  type: NodePort
  selector:
    name: hdfs-master
  ports:
    - name: rpc
      port: 9000
      targetPort: 9000
    - name: http
      port: 50070
      targetPort: 50070
      nodePort: 32007
---
apiVersion: v1
kind: ReplicationController
metadata:
  name: hdfs-master
  labels:
    name: hdfs-master
spec:
  replicas: 1
  selector:
    name: hdfs-master
  template:
    metadata:
      labels:
        name: hdfs-master
    spec:
      containers:
        - name: hdfs-master
          image: kubeguide/hadoop:latest
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 9000
            - containerPort: 50070
          env:
            - name: HADOOP_NODE_TYPE
              value: namenode
            - name: HDFS_MASTER_SERVICE
              valueFrom:
                configMapKeyRef:
                  name: kube-hadoop-conf
                  key: HDFS_MASTER_SERVICE
            - name: HDOOP_YARN_MASTER
              valueFrom:
                configMapKeyRef:
                  name: kube-hadoop-conf
                  key: HDOOP_YARN_MASTER
      restartPolicy: Always
---
apiVersion: v1
kind: ReplicationController
metadata:
  name: hadoop-datanode
  labels:
    app: hadoop-datanode
spec:
  replicas: 3
  selector:
    name: hadoop-datanode
  template:
    metadata:
      labels:
        name: hadoop-datanode
    spec:
      containers:
        - name: hadoop-datanode
          image: kubeguide/hadoop:latest
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 9000
            - containerPort: 50070
          env:
            - name: HADOOP_NODE_TYPE
              value: datanode
            - name: HDFS_MASTER_SERVICE
              valueFrom:
                configMapKeyRef:
                  name: kube-hadoop-conf
                  key: HDFS_MASTER_SERVICE
            - name: HDOOP_YARN_MASTER
              valueFrom:
                configMapKeyRef:
                  name: kube-hadoop-conf
                  key: HDOOP_YARN_MASTER
      restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
  name: hadoop-yarn-master
spec:
  type: NodePort
  selector:
    name: yarn-master
  ports:
     - name: "8030"
       port: 8030
     - name: "8031"
       port: 8031
     - name: "8032"
       port: 8032
     - name: http
       port: 8088
       targetPort: 8088
       nodePort: 32088
---
apiVersion: v1
kind: ReplicationController
metadata:
  name: yarn-master
  labels:
    name: yarn-master
spec:
  replicas: 1
  selector:
    name: yarn-master
  template:
    metadata:
      labels:
        name: yarn-master
    spec:
      containers:
        - name: yarn-master
          image: kubeguide/hadoop:latest
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 9000
            - containerPort: 50070
          env:
            - name: HADOOP_NODE_TYPE
              value: resourceman
            - name: HDFS_MASTER_SERVICE
              valueFrom:
                configMapKeyRef:
                  name: kube-hadoop-conf
                  key: HDFS_MASTER_SERVICE
            - name: HDOOP_YARN_MASTER
              valueFrom:
                configMapKeyRef:
                  name: kube-hadoop-conf
                  key: HDOOP_YARN_MASTER
      restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
  name: yarn-node
spec:
  clusterIP: None
  selector:
    name: yarn-node
  ports:
     - port: 8040
---
apiVersion: v1
kind: ReplicationController
metadata:
  name: yarn-node
  labels:
    name: yarn-node
spec:
  replicas: 3
  selector:
    name: yarn-node
  template:
    metadata:
      labels:
        name: yarn-node
    spec:
      containers:
        - name: yarn-node
          image: kubeguide/hadoop:latest
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 8040
            - containerPort: 8041
            - containerPort: 8042
          env:
            - name: HADOOP_NODE_TYPE
              value: yarnnode
            - name: HDFS_MASTER_SERVICE
              valueFrom:
                configMapKeyRef:
                  name: kube-hadoop-conf
                  key: HDFS_MASTER_SERVICE
            - name: HDOOP_YARN_MASTER
              valueFrom:
                configMapKeyRef:
                  name: kube-hadoop-conf
                  key: HDOOP_YARN_MASTER
      restartPolicy: Always


这个yaml文件包含一个ConfigMap,5个Service,8个pod,这里需要注意的是ConfigMap中HDFS_MASTER_SERVICE和HDOOP_YARN_MASTER不要使用IP,使用HDFS service的名称,否则datanode将会连接不上namenode

4、执行创建命令
[root@k8s-master hadoop]# kubectl create -f hadoop.yaml
5、创建完成后就可以通过浏览器看到熟悉的HDFS管理界面了

【http://ip:32007】

【http://ip:32088】

备注(该方式未解决持久化存储问题)

  • 4
    点赞
  • 20
    收藏
    觉得还不错? 一键收藏
  • 6
    评论
要在Kubernetes上部署Hadoop集群,你可以按照以下步骤进行操作: 1. 首先,下载Hadoop并进行部署。你可以从提供的链接中获得Hadoop的下载地址。根据你的需要,下载并部署Hadoop环境。 2. 在控制台中新增以下内容到配置文件hadoop-configmap.yaml中,以进行更多的控制。 3. 创建存储目录,并为Hadoop的NameNode和DataNode创建相应的存储目录。你可以按照以下命令创建存储目录: ``` mkdir -p /opt/bigdata/servers/hadoop/{nn,dn}/data/data{1..3} ``` 4. 使用Helm安装Hadoop chart,你可以使用以下命令: ``` helm install hadoop ./hadoop -n hadoop --create-namespace ``` 5. 完成以上步骤后,你的Hadoop集群将成功部署在Kubernetes上。 如果你想卸载Hadoop集群,可以按照以下步骤操作: 1. 使用Helm卸载Hadoop chart,你可以使用以下命令: ``` helm uninstall hadoop -n hadoop ``` 2. 删除Hadoop相关的Pod,你可以使用以下命令: ``` kubectl delete pod -n hadoop `kubectl get pod -n hadoop|awk 'NR>1{print $1}'` --force ``` 3. 如果需要,你可以修复命名空间,使用以下命令: ``` kubectl patch ns hadoop -p '{"metadata":{"finalizers":null}}' ``` 4. 最后,删除命名空间,你可以使用以下命令: ``` kubectl delete ns hadoop --force ``` 通过按照以上步骤操作,你就可以在Kubernetes上成功部署和卸载Hadoop集群了。<span class="em">1</span><span class="em">2</span><span class="em">3</span> #### 引用[.reference_title] - *1* *2* *3* [【云原生】Hadoop on k8s 环境部署](https://blog.csdn.net/zfw_666666/article/details/127362608)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v93^chatsearchT3_2"}}] [.reference_item style="max-width: 100%"] [ .reference_list ]

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论 6
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值